Matt Arsenault | 607a756 | 2017-11-28 23:40:12 +0000 | [diff] [blame] | 1 | ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=kaveri -enable-ipra=0 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CIVI %s |
| 2 | ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -enable-ipra=0 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 3 | |
| 4 | ; GCN-LABEL: {{^}}use_dispatch_ptr: |
| 5 | ; GCN: s_load_dword s{{[0-9]+}}, s[6:7], 0x0 |
| 6 | define void @use_dispatch_ptr() #1 { |
Yaxun Liu | 0124b54 | 2018-02-13 18:00:25 +0000 | [diff] [blame] | 7 | %dispatch_ptr = call noalias i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() #0 |
| 8 | %header_ptr = bitcast i8 addrspace(4)* %dispatch_ptr to i32 addrspace(4)* |
| 9 | %value = load volatile i32, i32 addrspace(4)* %header_ptr |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 10 | ret void |
| 11 | } |
| 12 | |
| 13 | ; GCN-LABEL: {{^}}kern_indirect_use_dispatch_ptr: |
| 14 | ; GCN: enable_sgpr_dispatch_ptr = 1 |
| 15 | ; GCN: s_mov_b64 s[6:7], s[4:5] |
| 16 | define amdgpu_kernel void @kern_indirect_use_dispatch_ptr(i32) #1 { |
| 17 | call void @use_dispatch_ptr() |
| 18 | ret void |
| 19 | } |
| 20 | |
| 21 | ; GCN-LABEL: {{^}}use_queue_ptr: |
| 22 | ; GCN: s_load_dword s{{[0-9]+}}, s[6:7], 0x0 |
| 23 | define void @use_queue_ptr() #1 { |
Yaxun Liu | 0124b54 | 2018-02-13 18:00:25 +0000 | [diff] [blame] | 24 | %queue_ptr = call noalias i8 addrspace(4)* @llvm.amdgcn.queue.ptr() #0 |
| 25 | %header_ptr = bitcast i8 addrspace(4)* %queue_ptr to i32 addrspace(4)* |
| 26 | %value = load volatile i32, i32 addrspace(4)* %header_ptr |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 27 | ret void |
| 28 | } |
| 29 | |
| 30 | ; GCN-LABEL: {{^}}kern_indirect_use_queue_ptr: |
| 31 | ; GCN: enable_sgpr_queue_ptr = 1 |
| 32 | ; GCN: s_mov_b64 s[6:7], s[4:5] |
| 33 | ; GCN: s_swappc_b64 |
| 34 | define amdgpu_kernel void @kern_indirect_use_queue_ptr(i32) #1 { |
| 35 | call void @use_queue_ptr() |
| 36 | ret void |
| 37 | } |
| 38 | |
| 39 | ; GCN-LABEL: {{^}}use_queue_ptr_addrspacecast: |
| 40 | ; CIVI: s_load_dword [[APERTURE_LOAD:s[0-9]+]], s[6:7], 0x10 |
| 41 | ; GFX9: s_getreg_b32 [[APERTURE_LOAD:s[0-9]+]] |
| 42 | |
| 43 | ; GCN: v_mov_b32_e32 v[[HI:[0-9]+]], [[APERTURE_LOAD]] |
| 44 | ; GCN: {{flat|global}}_store_dword v{{\[[0-9]+}}:[[HI]]{{\]}} |
| 45 | define void @use_queue_ptr_addrspacecast() #1 { |
Yaxun Liu | 2a22c5d | 2018-02-02 16:07:16 +0000 | [diff] [blame] | 46 | %asc = addrspacecast i32 addrspace(3)* inttoptr (i32 16 to i32 addrspace(3)*) to i32* |
| 47 | store volatile i32 0, i32* %asc |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 48 | ret void |
| 49 | } |
| 50 | |
| 51 | ; GCN-LABEL: {{^}}kern_indirect_use_queue_ptr_addrspacecast: |
| 52 | ; CIVI: enable_sgpr_queue_ptr = 1 |
| 53 | |
| 54 | ; CIVI: s_mov_b64 s[6:7], s[4:5] |
| 55 | ; GFX9-NOT: s_mov_b64 |
| 56 | ; GCN: s_swappc_b64 |
| 57 | define amdgpu_kernel void @kern_indirect_use_queue_ptr_addrspacecast(i32) #1 { |
| 58 | call void @use_queue_ptr_addrspacecast() |
| 59 | ret void |
| 60 | } |
| 61 | |
| 62 | ; GCN-LABEL: {{^}}use_kernarg_segment_ptr: |
| 63 | ; GCN: s_load_dword s{{[0-9]+}}, s[6:7], 0x0 |
| 64 | define void @use_kernarg_segment_ptr() #1 { |
Yaxun Liu | 0124b54 | 2018-02-13 18:00:25 +0000 | [diff] [blame] | 65 | %kernarg_segment_ptr = call noalias i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr() #0 |
| 66 | %header_ptr = bitcast i8 addrspace(4)* %kernarg_segment_ptr to i32 addrspace(4)* |
| 67 | %value = load volatile i32, i32 addrspace(4)* %header_ptr |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 68 | ret void |
| 69 | } |
| 70 | |
| 71 | ; GCN-LABEL: {{^}}kern_indirect_use_kernarg_segment_ptr: |
| 72 | ; GCN: enable_sgpr_kernarg_segment_ptr = 1 |
| 73 | ; GCN: s_mov_b64 s[6:7], s[4:5] |
| 74 | ; GCN: s_swappc_b64 |
| 75 | define amdgpu_kernel void @kern_indirect_use_kernarg_segment_ptr(i32) #1 { |
| 76 | call void @use_kernarg_segment_ptr() |
| 77 | ret void |
| 78 | } |
| 79 | |
| 80 | ; GCN-LABEL: {{^}}use_dispatch_id: |
| 81 | ; GCN: ; use s[6:7] |
| 82 | define void @use_dispatch_id() #1 { |
| 83 | %id = call i64 @llvm.amdgcn.dispatch.id() |
| 84 | call void asm sideeffect "; use $0", "s"(i64 %id) |
| 85 | ret void |
| 86 | } |
| 87 | |
| 88 | ; No kernarg segment so that there is a mov to check. With kernarg |
| 89 | ; pointer enabled, it happens to end up in the right place anyway. |
| 90 | |
| 91 | ; GCN-LABEL: {{^}}kern_indirect_use_dispatch_id: |
| 92 | ; GCN: enable_sgpr_dispatch_id = 1 |
| 93 | |
| 94 | ; GCN: s_mov_b64 s[6:7], s[4:5] |
| 95 | define amdgpu_kernel void @kern_indirect_use_dispatch_id() #1 { |
| 96 | call void @use_dispatch_id() |
| 97 | ret void |
| 98 | } |
| 99 | |
| 100 | ; GCN-LABEL: {{^}}use_workgroup_id_x: |
| 101 | ; GCN: s_waitcnt |
| 102 | ; GCN: ; use s6 |
| 103 | define void @use_workgroup_id_x() #1 { |
| 104 | %val = call i32 @llvm.amdgcn.workgroup.id.x() |
| 105 | call void asm sideeffect "; use $0", "s"(i32 %val) |
| 106 | ret void |
| 107 | } |
| 108 | |
| 109 | ; GCN-LABEL: {{^}}use_stack_workgroup_id_x: |
| 110 | ; GCN: s_waitcnt |
| 111 | ; GCN: s_mov_b32 s5, s32 |
| 112 | ; GCN: buffer_store_dword v0, off, s[0:3], s5 offset:4 |
| 113 | ; GCN: ; use s6 |
| 114 | ; GCN: s_setpc_b64 |
| 115 | define void @use_stack_workgroup_id_x() #1 { |
Yaxun Liu | 2a22c5d | 2018-02-02 16:07:16 +0000 | [diff] [blame] | 116 | %alloca = alloca i32, addrspace(5) |
| 117 | store volatile i32 0, i32 addrspace(5)* %alloca |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 118 | %val = call i32 @llvm.amdgcn.workgroup.id.x() |
| 119 | call void asm sideeffect "; use $0", "s"(i32 %val) |
| 120 | ret void |
| 121 | } |
| 122 | |
| 123 | ; GCN-LABEL: {{^}}use_workgroup_id_y: |
| 124 | ; GCN: s_waitcnt |
| 125 | ; GCN: ; use s6 |
| 126 | define void @use_workgroup_id_y() #1 { |
| 127 | %val = call i32 @llvm.amdgcn.workgroup.id.y() |
| 128 | call void asm sideeffect "; use $0", "s"(i32 %val) |
| 129 | ret void |
| 130 | } |
| 131 | |
| 132 | ; GCN-LABEL: {{^}}use_workgroup_id_z: |
| 133 | ; GCN: s_waitcnt |
| 134 | ; GCN: ; use s6 |
| 135 | define void @use_workgroup_id_z() #1 { |
| 136 | %val = call i32 @llvm.amdgcn.workgroup.id.z() |
| 137 | call void asm sideeffect "; use $0", "s"(i32 %val) |
| 138 | ret void |
| 139 | } |
| 140 | |
| 141 | ; GCN-LABEL: {{^}}use_workgroup_id_xy: |
| 142 | ; GCN: ; use s6 |
| 143 | ; GCN: ; use s7 |
| 144 | define void @use_workgroup_id_xy() #1 { |
| 145 | %val0 = call i32 @llvm.amdgcn.workgroup.id.x() |
| 146 | %val1 = call i32 @llvm.amdgcn.workgroup.id.y() |
| 147 | call void asm sideeffect "; use $0", "s"(i32 %val0) |
| 148 | call void asm sideeffect "; use $0", "s"(i32 %val1) |
| 149 | ret void |
| 150 | } |
| 151 | |
| 152 | ; GCN-LABEL: {{^}}use_workgroup_id_xyz: |
| 153 | ; GCN: ; use s6 |
| 154 | ; GCN: ; use s7 |
| 155 | ; GCN: ; use s8 |
| 156 | define void @use_workgroup_id_xyz() #1 { |
| 157 | %val0 = call i32 @llvm.amdgcn.workgroup.id.x() |
| 158 | %val1 = call i32 @llvm.amdgcn.workgroup.id.y() |
| 159 | %val2 = call i32 @llvm.amdgcn.workgroup.id.z() |
| 160 | call void asm sideeffect "; use $0", "s"(i32 %val0) |
| 161 | call void asm sideeffect "; use $0", "s"(i32 %val1) |
| 162 | call void asm sideeffect "; use $0", "s"(i32 %val2) |
| 163 | ret void |
| 164 | } |
| 165 | |
| 166 | ; GCN-LABEL: {{^}}use_workgroup_id_xz: |
| 167 | ; GCN: ; use s6 |
| 168 | ; GCN: ; use s7 |
| 169 | define void @use_workgroup_id_xz() #1 { |
| 170 | %val0 = call i32 @llvm.amdgcn.workgroup.id.x() |
| 171 | %val1 = call i32 @llvm.amdgcn.workgroup.id.z() |
| 172 | call void asm sideeffect "; use $0", "s"(i32 %val0) |
| 173 | call void asm sideeffect "; use $0", "s"(i32 %val1) |
| 174 | ret void |
| 175 | } |
| 176 | |
| 177 | ; GCN-LABEL: {{^}}use_workgroup_id_yz: |
| 178 | ; GCN: ; use s6 |
| 179 | ; GCN: ; use s7 |
| 180 | define void @use_workgroup_id_yz() #1 { |
| 181 | %val0 = call i32 @llvm.amdgcn.workgroup.id.y() |
| 182 | %val1 = call i32 @llvm.amdgcn.workgroup.id.z() |
| 183 | call void asm sideeffect "; use $0", "s"(i32 %val0) |
| 184 | call void asm sideeffect "; use $0", "s"(i32 %val1) |
| 185 | ret void |
| 186 | } |
| 187 | |
| 188 | ; GCN-LABEL: {{^}}kern_indirect_use_workgroup_id_x: |
| 189 | ; GCN: enable_sgpr_workgroup_id_x = 1 |
| 190 | ; GCN: enable_sgpr_workgroup_id_y = 0 |
| 191 | ; GCN: enable_sgpr_workgroup_id_z = 0 |
| 192 | |
| 193 | ; GCN-NOT: s6 |
Geoff Berry | 4e38e02 | 2017-08-17 04:04:11 +0000 | [diff] [blame] | 194 | ; GCN: s_mov_b32 s33, s7 |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 195 | ; GCN-NOT: s6 |
Geoff Berry | 4e38e02 | 2017-08-17 04:04:11 +0000 | [diff] [blame] | 196 | ; GCN: s_mov_b32 s4, s33 |
| 197 | ; GCN-NOT: s6 |
| 198 | ; GCN: s_mov_b32 s32, s33 |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 199 | ; GCN: s_swappc_b64 |
| 200 | define amdgpu_kernel void @kern_indirect_use_workgroup_id_x() #1 { |
| 201 | call void @use_workgroup_id_x() |
| 202 | ret void |
| 203 | } |
| 204 | |
| 205 | ; GCN-LABEL: {{^}}kern_indirect_use_workgroup_id_y: |
| 206 | ; GCN: enable_sgpr_workgroup_id_x = 1 |
| 207 | ; GCN: enable_sgpr_workgroup_id_y = 1 |
| 208 | ; GCN: enable_sgpr_workgroup_id_z = 0 |
| 209 | |
| 210 | ; GCN: s_mov_b32 s33, s8 |
| 211 | ; GCN: s_mov_b32 s4, s33 |
| 212 | ; GCN: s_mov_b32 s6, s7 |
Geoff Berry | 4e38e02 | 2017-08-17 04:04:11 +0000 | [diff] [blame] | 213 | ; GCN: s_mov_b32 s32, s33 |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 214 | ; GCN: s_swappc_b64 |
| 215 | define amdgpu_kernel void @kern_indirect_use_workgroup_id_y() #1 { |
| 216 | call void @use_workgroup_id_y() |
| 217 | ret void |
| 218 | } |
| 219 | |
| 220 | ; GCN-LABEL: {{^}}kern_indirect_use_workgroup_id_z: |
| 221 | ; GCN: enable_sgpr_workgroup_id_x = 1 |
| 222 | ; GCN: enable_sgpr_workgroup_id_y = 0 |
| 223 | ; GCN: enable_sgpr_workgroup_id_z = 1 |
| 224 | |
| 225 | ; GCN: s_mov_b32 s33, s8 |
| 226 | ; GCN: s_mov_b32 s4, s33 |
| 227 | ; GCN: s_mov_b32 s6, s7 |
| 228 | ; GCN: s_swappc_b64 |
| 229 | define amdgpu_kernel void @kern_indirect_use_workgroup_id_z() #1 { |
| 230 | call void @use_workgroup_id_z() |
| 231 | ret void |
| 232 | } |
| 233 | |
| 234 | ; GCN-LABEL: {{^}}kern_indirect_use_workgroup_id_xy: |
| 235 | ; GCN: enable_sgpr_workgroup_id_x = 1 |
| 236 | ; GCN: enable_sgpr_workgroup_id_y = 1 |
| 237 | ; GCN: enable_sgpr_workgroup_id_z = 0 |
| 238 | |
| 239 | ; GCN: s_mov_b32 s33, s8 |
| 240 | ; GCN-NOT: s6 |
| 241 | ; GCN-NOT: s7 |
Geoff Berry | 4e38e02 | 2017-08-17 04:04:11 +0000 | [diff] [blame] | 242 | ; GCN: s_mov_b32 s4, s33 |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 243 | ; GCN-NOT: s6 |
| 244 | ; GCN-NOT: s7 |
Geoff Berry | 4e38e02 | 2017-08-17 04:04:11 +0000 | [diff] [blame] | 245 | ; GCN: s_mov_b32 s32, s33 |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 246 | ; GCN-NOT: s6 |
| 247 | ; GCN-NOT: s7 |
| 248 | ; GCN: s_swappc_b64 |
| 249 | define amdgpu_kernel void @kern_indirect_use_workgroup_id_xy() #1 { |
| 250 | call void @use_workgroup_id_xy() |
| 251 | ret void |
| 252 | } |
| 253 | |
| 254 | ; GCN-LABEL: {{^}}kern_indirect_use_workgroup_id_xyz: |
| 255 | ; GCN: enable_sgpr_workgroup_id_x = 1 |
| 256 | ; GCN: enable_sgpr_workgroup_id_y = 1 |
| 257 | ; GCN: enable_sgpr_workgroup_id_z = 1 |
| 258 | |
Geoff Berry | 4e38e02 | 2017-08-17 04:04:11 +0000 | [diff] [blame] | 259 | ; GCN: s_mov_b32 s33, s9 |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 260 | |
| 261 | ; GCN-NOT: s6 |
| 262 | ; GCN-NOT: s7 |
| 263 | ; GCN-NOT: s8 |
| 264 | |
Geoff Berry | 4e38e02 | 2017-08-17 04:04:11 +0000 | [diff] [blame] | 265 | ; GCN: s_mov_b32 s4, s33 |
| 266 | |
| 267 | ; GCN-NOT: s6 |
| 268 | ; GCN-NOT: s7 |
| 269 | ; GCN-NOT: s8 |
| 270 | |
| 271 | ; GCN: s_mov_b32 s32, s33 |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 272 | |
| 273 | ; GCN-NOT: s6 |
| 274 | ; GCN-NOT: s7 |
| 275 | ; GCN-NOT: s8 |
| 276 | |
| 277 | ; GCN: s_swappc_b64 |
| 278 | define amdgpu_kernel void @kern_indirect_use_workgroup_id_xyz() #1 { |
| 279 | call void @use_workgroup_id_xyz() |
| 280 | ret void |
| 281 | } |
| 282 | |
| 283 | ; GCN-LABEL: {{^}}kern_indirect_use_workgroup_id_xz: |
| 284 | ; GCN: enable_sgpr_workgroup_id_x = 1 |
| 285 | ; GCN: enable_sgpr_workgroup_id_y = 0 |
| 286 | ; GCN: enable_sgpr_workgroup_id_z = 1 |
| 287 | |
| 288 | ; GCN: s_mov_b32 s33, s8 |
| 289 | ; GCN-NOT: s6 |
| 290 | ; GCN-NOT: s7 |
| 291 | |
Geoff Berry | 4e38e02 | 2017-08-17 04:04:11 +0000 | [diff] [blame] | 292 | ; GCN: s_mov_b32 s4, s33 |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 293 | ; GCN-NOT: s6 |
| 294 | ; GCN-NOT: s7 |
| 295 | |
Geoff Berry | 4e38e02 | 2017-08-17 04:04:11 +0000 | [diff] [blame] | 296 | ; GCN: s_mov_b32 s32, s33 |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 297 | ; GCN-NOT: s6 |
| 298 | ; GCN-NOT: s7 |
| 299 | |
| 300 | ; GCN: s_swappc_b64 |
| 301 | define amdgpu_kernel void @kern_indirect_use_workgroup_id_xz() #1 { |
| 302 | call void @use_workgroup_id_xz() |
| 303 | ret void |
| 304 | } |
| 305 | |
| 306 | ; GCN-LABEL: {{^}}kern_indirect_use_workgroup_id_yz: |
| 307 | ; GCN: enable_sgpr_workgroup_id_x = 1 |
| 308 | ; GCN: enable_sgpr_workgroup_id_y = 1 |
| 309 | ; GCN: enable_sgpr_workgroup_id_z = 1 |
| 310 | |
Geoff Berry | 4e38e02 | 2017-08-17 04:04:11 +0000 | [diff] [blame] | 311 | ; GCN: s_mov_b32 s33, s9 |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 312 | ; GCN: s_mov_b32 s6, s7 |
Geoff Berry | 4e38e02 | 2017-08-17 04:04:11 +0000 | [diff] [blame] | 313 | ; GCN: s_mov_b32 s4, s33 |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 314 | ; GCN: s_mov_b32 s7, s8 |
Geoff Berry | 4e38e02 | 2017-08-17 04:04:11 +0000 | [diff] [blame] | 315 | ; GCN: s_mov_b32 s32, s33 |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 316 | ; GCN: s_swappc_b64 |
| 317 | define amdgpu_kernel void @kern_indirect_use_workgroup_id_yz() #1 { |
| 318 | call void @use_workgroup_id_yz() |
| 319 | ret void |
| 320 | } |
| 321 | |
| 322 | ; Argument is in right place already |
| 323 | ; GCN-LABEL: {{^}}func_indirect_use_workgroup_id_x: |
| 324 | ; GCN-NOT: s6 |
| 325 | define void @func_indirect_use_workgroup_id_x() #1 { |
| 326 | call void @use_workgroup_id_x() |
| 327 | ret void |
| 328 | } |
| 329 | |
| 330 | ; GCN-LABEL: {{^}}func_indirect_use_workgroup_id_y: |
| 331 | ; GCN-NOT: s6 |
| 332 | define void @func_indirect_use_workgroup_id_y() #1 { |
| 333 | call void @use_workgroup_id_y() |
| 334 | ret void |
| 335 | } |
| 336 | |
| 337 | ; GCN-LABEL: {{^}}func_indirect_use_workgroup_id_z: |
| 338 | ; GCN-NOT: s6 |
| 339 | define void @func_indirect_use_workgroup_id_z() #1 { |
| 340 | call void @use_workgroup_id_z() |
| 341 | ret void |
| 342 | } |
| 343 | |
| 344 | ; GCN-LABEL: {{^}}other_arg_use_workgroup_id_x: |
| 345 | ; GCN: {{flat|global}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, v0 |
| 346 | ; GCN: ; use s6 |
| 347 | define void @other_arg_use_workgroup_id_x(i32 %arg0) #1 { |
| 348 | %val = call i32 @llvm.amdgcn.workgroup.id.x() |
| 349 | store volatile i32 %arg0, i32 addrspace(1)* undef |
| 350 | call void asm sideeffect "; use $0", "s"(i32 %val) |
| 351 | ret void |
| 352 | } |
| 353 | |
| 354 | ; GCN-LABEL: {{^}}other_arg_use_workgroup_id_y: |
| 355 | ; GCN: {{flat|global}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, v0 |
| 356 | ; GCN: ; use s6 |
| 357 | define void @other_arg_use_workgroup_id_y(i32 %arg0) #1 { |
| 358 | %val = call i32 @llvm.amdgcn.workgroup.id.y() |
| 359 | store volatile i32 %arg0, i32 addrspace(1)* undef |
| 360 | call void asm sideeffect "; use $0", "s"(i32 %val) |
| 361 | ret void |
| 362 | } |
| 363 | |
| 364 | ; GCN-LABEL: {{^}}other_arg_use_workgroup_id_z: |
| 365 | ; GCN: {{flat|global}}_store_dword v{{\[[0-9]+:[0-9]+\]}}, v0 |
| 366 | ; GCN: ; use s6 |
| 367 | define void @other_arg_use_workgroup_id_z(i32 %arg0) #1 { |
| 368 | %val = call i32 @llvm.amdgcn.workgroup.id.z() |
| 369 | store volatile i32 %arg0, i32 addrspace(1)* undef |
| 370 | call void asm sideeffect "; use $0", "s"(i32 %val) |
| 371 | ret void |
| 372 | } |
| 373 | |
| 374 | ; GCN-LABEL: {{^}}kern_indirect_other_arg_use_workgroup_id_x: |
| 375 | ; GCN: enable_sgpr_workgroup_id_x = 1 |
| 376 | ; GCN: enable_sgpr_workgroup_id_y = 0 |
| 377 | ; GCN: enable_sgpr_workgroup_id_z = 0 |
| 378 | |
Geoff Berry | 4e38e02 | 2017-08-17 04:04:11 +0000 | [diff] [blame] | 379 | ; GCN-DAG: s_mov_b32 s33, s7 |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 380 | ; GCN-DAG: v_mov_b32_e32 v0, 0x22b |
| 381 | |
| 382 | ; GCN-NOT: s6 |
Geoff Berry | 4e38e02 | 2017-08-17 04:04:11 +0000 | [diff] [blame] | 383 | ; GCN: s_mov_b32 s4, s33 |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 384 | ; GCN-NOT: s6 |
Geoff Berry | 4e38e02 | 2017-08-17 04:04:11 +0000 | [diff] [blame] | 385 | ; GCN-DAG: s_mov_b32 s32, s33 |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 386 | ; GCN: s_swappc_b64 |
| 387 | define amdgpu_kernel void @kern_indirect_other_arg_use_workgroup_id_x() #1 { |
| 388 | call void @other_arg_use_workgroup_id_x(i32 555) |
| 389 | ret void |
| 390 | } |
| 391 | |
| 392 | ; GCN-LABEL: {{^}}kern_indirect_other_arg_use_workgroup_id_y: |
| 393 | ; GCN: enable_sgpr_workgroup_id_x = 1 |
| 394 | ; GCN: enable_sgpr_workgroup_id_y = 1 |
| 395 | ; GCN: enable_sgpr_workgroup_id_z = 0 |
| 396 | |
| 397 | ; GCN-DAG: s_mov_b32 s33, s8 |
| 398 | ; GCN-DAG: v_mov_b32_e32 v0, 0x22b |
| 399 | ; GCN: s_mov_b32 s4, s33 |
| 400 | ; GCN-DAG: s_mov_b32 s6, s7 |
Geoff Berry | 4e38e02 | 2017-08-17 04:04:11 +0000 | [diff] [blame] | 401 | ; GCN-DAG: s_mov_b32 s32, s33 |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 402 | ; GCN: s_swappc_b64 |
| 403 | define amdgpu_kernel void @kern_indirect_other_arg_use_workgroup_id_y() #1 { |
| 404 | call void @other_arg_use_workgroup_id_y(i32 555) |
| 405 | ret void |
| 406 | } |
| 407 | |
| 408 | ; GCN-LABEL: {{^}}kern_indirect_other_arg_use_workgroup_id_z: |
| 409 | ; GCN: enable_sgpr_workgroup_id_x = 1 |
| 410 | ; GCN: enable_sgpr_workgroup_id_y = 0 |
| 411 | ; GCN: enable_sgpr_workgroup_id_z = 1 |
| 412 | |
| 413 | ; GCN: s_mov_b32 s33, s8 |
| 414 | ; GCN-DAG: v_mov_b32_e32 v0, 0x22b |
| 415 | ; GCN: s_mov_b32 s4, s33 |
| 416 | ; GCN-DAG: s_mov_b32 s6, s7 |
| 417 | |
Geoff Berry | 4e38e02 | 2017-08-17 04:04:11 +0000 | [diff] [blame] | 418 | ; GCN: s_mov_b32 s32, s33 |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 419 | ; GCN: s_swappc_b64 |
| 420 | define amdgpu_kernel void @kern_indirect_other_arg_use_workgroup_id_z() #1 { |
| 421 | call void @other_arg_use_workgroup_id_z(i32 555) |
| 422 | ret void |
| 423 | } |
| 424 | |
| 425 | ; GCN-LABEL: {{^}}use_every_sgpr_input: |
| 426 | ; GCN: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s5 offset:4 |
| 427 | ; GCN: s_load_dword s{{[0-9]+}}, s[6:7], 0x0 |
| 428 | ; GCN: s_load_dword s{{[0-9]+}}, s[8:9], 0x0 |
| 429 | ; GCN: s_load_dword s{{[0-9]+}}, s[10:11], 0x0 |
| 430 | ; GCN: ; use s[12:13] |
| 431 | ; GCN: ; use s14 |
| 432 | ; GCN: ; use s15 |
| 433 | ; GCN: ; use s16 |
| 434 | define void @use_every_sgpr_input() #1 { |
Yaxun Liu | 2a22c5d | 2018-02-02 16:07:16 +0000 | [diff] [blame] | 435 | %alloca = alloca i32, align 4, addrspace(5) |
| 436 | store volatile i32 0, i32 addrspace(5)* %alloca |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 437 | |
Yaxun Liu | 0124b54 | 2018-02-13 18:00:25 +0000 | [diff] [blame] | 438 | %dispatch_ptr = call noalias i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() #0 |
| 439 | %dispatch_ptr.bc = bitcast i8 addrspace(4)* %dispatch_ptr to i32 addrspace(4)* |
| 440 | %val0 = load volatile i32, i32 addrspace(4)* %dispatch_ptr.bc |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 441 | |
Yaxun Liu | 0124b54 | 2018-02-13 18:00:25 +0000 | [diff] [blame] | 442 | %queue_ptr = call noalias i8 addrspace(4)* @llvm.amdgcn.queue.ptr() #0 |
| 443 | %queue_ptr.bc = bitcast i8 addrspace(4)* %queue_ptr to i32 addrspace(4)* |
| 444 | %val1 = load volatile i32, i32 addrspace(4)* %queue_ptr.bc |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 445 | |
Yaxun Liu | 0124b54 | 2018-02-13 18:00:25 +0000 | [diff] [blame] | 446 | %kernarg_segment_ptr = call noalias i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr() #0 |
| 447 | %kernarg_segment_ptr.bc = bitcast i8 addrspace(4)* %kernarg_segment_ptr to i32 addrspace(4)* |
| 448 | %val2 = load volatile i32, i32 addrspace(4)* %kernarg_segment_ptr.bc |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 449 | |
| 450 | %val3 = call i64 @llvm.amdgcn.dispatch.id() |
| 451 | call void asm sideeffect "; use $0", "s"(i64 %val3) |
| 452 | |
| 453 | %val4 = call i32 @llvm.amdgcn.workgroup.id.x() |
| 454 | call void asm sideeffect "; use $0", "s"(i32 %val4) |
| 455 | |
| 456 | %val5 = call i32 @llvm.amdgcn.workgroup.id.y() |
| 457 | call void asm sideeffect "; use $0", "s"(i32 %val5) |
| 458 | |
| 459 | %val6 = call i32 @llvm.amdgcn.workgroup.id.z() |
| 460 | call void asm sideeffect "; use $0", "s"(i32 %val6) |
| 461 | |
| 462 | ret void |
| 463 | } |
| 464 | |
| 465 | ; GCN-LABEL: {{^}}kern_indirect_use_every_sgpr_input: |
| 466 | ; GCN: enable_sgpr_workgroup_id_x = 1 |
| 467 | ; GCN: enable_sgpr_workgroup_id_y = 1 |
| 468 | ; GCN: enable_sgpr_workgroup_id_z = 1 |
| 469 | ; GCN: enable_sgpr_workgroup_info = 0 |
| 470 | |
| 471 | ; GCN: enable_sgpr_private_segment_buffer = 1 |
| 472 | ; GCN: enable_sgpr_dispatch_ptr = 1 |
| 473 | ; GCN: enable_sgpr_queue_ptr = 1 |
| 474 | ; GCN: enable_sgpr_kernarg_segment_ptr = 1 |
| 475 | ; GCN: enable_sgpr_dispatch_id = 1 |
| 476 | ; GCN: enable_sgpr_flat_scratch_init = 1 |
| 477 | |
Geoff Berry | 4e38e02 | 2017-08-17 04:04:11 +0000 | [diff] [blame] | 478 | ; GCN: s_mov_b32 s33, s17 |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 479 | ; GCN: s_mov_b64 s[12:13], s[10:11] |
| 480 | ; GCN: s_mov_b64 s[10:11], s[8:9] |
| 481 | ; GCN: s_mov_b64 s[8:9], s[6:7] |
| 482 | ; GCN: s_mov_b64 s[6:7], s[4:5] |
Geoff Berry | 4e38e02 | 2017-08-17 04:04:11 +0000 | [diff] [blame] | 483 | ; GCN: s_mov_b32 s4, s33 |
| 484 | ; GCN: s_mov_b32 s32, s33 |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 485 | ; GCN: s_swappc_b64 |
| 486 | define amdgpu_kernel void @kern_indirect_use_every_sgpr_input() #1 { |
| 487 | call void @use_every_sgpr_input() |
| 488 | ret void |
| 489 | } |
| 490 | |
| 491 | ; GCN-LABEL: {{^}}func_indirect_use_every_sgpr_input: |
| 492 | ; GCN-NOT: s6 |
| 493 | ; GCN-NOT: s7 |
| 494 | ; GCN-NOT: s8 |
| 495 | ; GCN-NOT: s9 |
| 496 | ; GCN-NOT: s10 |
| 497 | ; GCN-NOT: s11 |
| 498 | ; GCN-NOT: s12 |
| 499 | ; GCN-NOT: s13 |
| 500 | ; GCN-NOT: s[6:7] |
| 501 | ; GCN-NOT: s[8:9] |
| 502 | ; GCN-NOT: s[10:11] |
| 503 | ; GCN-NOT: s[12:13] |
| 504 | define void @func_indirect_use_every_sgpr_input() #1 { |
| 505 | call void @use_every_sgpr_input() |
| 506 | ret void |
| 507 | } |
| 508 | |
| 509 | ; GCN-LABEL: {{^}}func_use_every_sgpr_input_call_use_workgroup_id_xyz: |
| 510 | ; GCN-DAG: s_mov_b32 s6, s14 |
| 511 | ; GCN-DAG: s_mov_b32 s7, s15 |
| 512 | ; GCN-DAG: s_mov_b32 s8, s16 |
| 513 | ; GCN: s_swappc_b64 |
| 514 | define void @func_use_every_sgpr_input_call_use_workgroup_id_xyz() #1 { |
Yaxun Liu | 2a22c5d | 2018-02-02 16:07:16 +0000 | [diff] [blame] | 515 | %alloca = alloca i32, align 4, addrspace(5) |
| 516 | store volatile i32 0, i32 addrspace(5)* %alloca |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 517 | |
Yaxun Liu | 0124b54 | 2018-02-13 18:00:25 +0000 | [diff] [blame] | 518 | %dispatch_ptr = call noalias i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() #0 |
| 519 | %dispatch_ptr.bc = bitcast i8 addrspace(4)* %dispatch_ptr to i32 addrspace(4)* |
| 520 | %val0 = load volatile i32, i32 addrspace(4)* %dispatch_ptr.bc |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 521 | |
Yaxun Liu | 0124b54 | 2018-02-13 18:00:25 +0000 | [diff] [blame] | 522 | %queue_ptr = call noalias i8 addrspace(4)* @llvm.amdgcn.queue.ptr() #0 |
| 523 | %queue_ptr.bc = bitcast i8 addrspace(4)* %queue_ptr to i32 addrspace(4)* |
| 524 | %val1 = load volatile i32, i32 addrspace(4)* %queue_ptr.bc |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 525 | |
Yaxun Liu | 0124b54 | 2018-02-13 18:00:25 +0000 | [diff] [blame] | 526 | %kernarg_segment_ptr = call noalias i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr() #0 |
| 527 | %kernarg_segment_ptr.bc = bitcast i8 addrspace(4)* %kernarg_segment_ptr to i32 addrspace(4)* |
| 528 | %val2 = load volatile i32, i32 addrspace(4)* %kernarg_segment_ptr.bc |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 529 | |
| 530 | %val3 = call i64 @llvm.amdgcn.dispatch.id() |
| 531 | call void asm sideeffect "; use $0", "s"(i64 %val3) |
| 532 | |
| 533 | %val4 = call i32 @llvm.amdgcn.workgroup.id.x() |
| 534 | call void asm sideeffect "; use $0", "s"(i32 %val4) |
| 535 | |
| 536 | %val5 = call i32 @llvm.amdgcn.workgroup.id.y() |
| 537 | call void asm sideeffect "; use $0", "s"(i32 %val5) |
| 538 | |
| 539 | %val6 = call i32 @llvm.amdgcn.workgroup.id.z() |
| 540 | call void asm sideeffect "; use $0", "s"(i32 %val6) |
| 541 | |
| 542 | call void @use_workgroup_id_xyz() |
| 543 | ret void |
| 544 | } |
| 545 | |
| 546 | ; GCN-LABEL: {{^}}func_use_every_sgpr_input_call_use_workgroup_id_xyz_spill: |
| 547 | ; GCN: s_mov_b32 s5, s32 |
| 548 | ; GCN: s_add_u32 s32, s32, 0x300 |
| 549 | |
Quentin Colombet | 48abac8 | 2018-02-17 03:05:33 +0000 | [diff] [blame] | 550 | ; GCN-DAG: s_mov_b32 [[SAVE_X:s[0-9]+]], s14 |
| 551 | ; GCN-DAG: s_mov_b32 [[SAVE_Y:s[0-9]+]], s15 |
| 552 | ; GCN-DAG: s_mov_b32 [[SAVE_Z:s[0-9]+]], s16 |
Geoff Berry | 4e38e02 | 2017-08-17 04:04:11 +0000 | [diff] [blame] | 553 | ; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[6:7] |
| 554 | ; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[8:9] |
| 555 | ; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[10:11] |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 556 | |
Quentin Colombet | 48abac8 | 2018-02-17 03:05:33 +0000 | [diff] [blame] | 557 | ; GCN-DAG: s_mov_b32 s6, [[SAVE_X]] |
| 558 | ; GCN-DAG: s_mov_b32 s7, [[SAVE_Y]] |
| 559 | ; GCN-DAG: s_mov_b32 s8, [[SAVE_Z]] |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 560 | ; GCN: s_swappc_b64 |
| 561 | |
| 562 | ; GCN: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s5 offset:4 |
| 563 | ; GCN: s_load_dword s{{[0-9]+}}, |
| 564 | ; GCN: s_load_dword s{{[0-9]+}}, |
| 565 | ; GCN: s_load_dword s{{[0-9]+}}, |
| 566 | ; GCN: ; use |
| 567 | ; GCN: ; use [[SAVE_X]] |
| 568 | ; GCN: ; use [[SAVE_Y]] |
| 569 | ; GCN: ; use [[SAVE_Z]] |
| 570 | define void @func_use_every_sgpr_input_call_use_workgroup_id_xyz_spill() #1 { |
Yaxun Liu | 2a22c5d | 2018-02-02 16:07:16 +0000 | [diff] [blame] | 571 | %alloca = alloca i32, align 4, addrspace(5) |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 572 | call void @use_workgroup_id_xyz() |
| 573 | |
Yaxun Liu | 2a22c5d | 2018-02-02 16:07:16 +0000 | [diff] [blame] | 574 | store volatile i32 0, i32 addrspace(5)* %alloca |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 575 | |
Yaxun Liu | 0124b54 | 2018-02-13 18:00:25 +0000 | [diff] [blame] | 576 | %dispatch_ptr = call noalias i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() #0 |
| 577 | %dispatch_ptr.bc = bitcast i8 addrspace(4)* %dispatch_ptr to i32 addrspace(4)* |
| 578 | %val0 = load volatile i32, i32 addrspace(4)* %dispatch_ptr.bc |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 579 | |
Yaxun Liu | 0124b54 | 2018-02-13 18:00:25 +0000 | [diff] [blame] | 580 | %queue_ptr = call noalias i8 addrspace(4)* @llvm.amdgcn.queue.ptr() #0 |
| 581 | %queue_ptr.bc = bitcast i8 addrspace(4)* %queue_ptr to i32 addrspace(4)* |
| 582 | %val1 = load volatile i32, i32 addrspace(4)* %queue_ptr.bc |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 583 | |
Yaxun Liu | 0124b54 | 2018-02-13 18:00:25 +0000 | [diff] [blame] | 584 | %kernarg_segment_ptr = call noalias i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr() #0 |
| 585 | %kernarg_segment_ptr.bc = bitcast i8 addrspace(4)* %kernarg_segment_ptr to i32 addrspace(4)* |
| 586 | %val2 = load volatile i32, i32 addrspace(4)* %kernarg_segment_ptr.bc |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 587 | |
| 588 | %val3 = call i64 @llvm.amdgcn.dispatch.id() |
| 589 | call void asm sideeffect "; use $0", "s"(i64 %val3) |
| 590 | |
| 591 | %val4 = call i32 @llvm.amdgcn.workgroup.id.x() |
| 592 | call void asm sideeffect "; use $0", "s"(i32 %val4) |
| 593 | |
| 594 | %val5 = call i32 @llvm.amdgcn.workgroup.id.y() |
| 595 | call void asm sideeffect "; use $0", "s"(i32 %val5) |
| 596 | |
| 597 | %val6 = call i32 @llvm.amdgcn.workgroup.id.z() |
| 598 | call void asm sideeffect "; use $0", "s"(i32 %val6) |
| 599 | |
| 600 | ret void |
| 601 | } |
| 602 | |
| 603 | declare i32 @llvm.amdgcn.workgroup.id.x() #0 |
| 604 | declare i32 @llvm.amdgcn.workgroup.id.y() #0 |
| 605 | declare i32 @llvm.amdgcn.workgroup.id.z() #0 |
Yaxun Liu | 0124b54 | 2018-02-13 18:00:25 +0000 | [diff] [blame] | 606 | declare noalias i8 addrspace(4)* @llvm.amdgcn.queue.ptr() #0 |
| 607 | declare noalias i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr() #0 |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 608 | declare i64 @llvm.amdgcn.dispatch.id() #0 |
Yaxun Liu | 0124b54 | 2018-02-13 18:00:25 +0000 | [diff] [blame] | 609 | declare noalias i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() #0 |
Matt Arsenault | 8623e8d | 2017-08-03 23:00:29 +0000 | [diff] [blame] | 610 | |
| 611 | attributes #0 = { nounwind readnone speculatable } |
| 612 | attributes #1 = { nounwind noinline } |