Matt Arsenault | 0774ea2 | 2017-04-24 19:40:59 +0000 | [diff] [blame] | 1 | ; RUN: llc -march=amdgcn -mtriple=amdgcn-amd-amdhsa -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=HSA -check-prefix=CI %s |
| 2 | ; RUN: llc -march=amdgcn -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=HSA -check-prefix=GFX9 %s |
Matt Arsenault | 592d068 | 2015-12-01 23:04:05 +0000 | [diff] [blame] | 3 | |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 4 | ; HSA-LABEL: {{^}}use_group_to_flat_addrspacecast: |
| 5 | ; HSA: enable_sgpr_private_segment_buffer = 1 |
| 6 | ; HSA: enable_sgpr_dispatch_ptr = 0 |
Matt Arsenault | e823d92 | 2017-02-18 18:29:53 +0000 | [diff] [blame] | 7 | ; CI: enable_sgpr_queue_ptr = 1 |
| 8 | ; GFX9: enable_sgpr_queue_ptr = 0 |
Matt Arsenault | 592d068 | 2015-12-01 23:04:05 +0000 | [diff] [blame] | 9 | |
Matt Arsenault | e823d92 | 2017-02-18 18:29:53 +0000 | [diff] [blame] | 10 | ; CI-DAG: s_load_dword [[PTR:s[0-9]+]], s[6:7], 0x0{{$}} |
| 11 | ; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x10{{$}} |
| 12 | ; CI-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[APERTURE]] |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 13 | |
Matt Arsenault | e823d92 | 2017-02-18 18:29:53 +0000 | [diff] [blame] | 14 | ; GFX9-DAG: s_load_dword [[PTR:s[0-9]+]], s[4:5], 0x0{{$}} |
Konstantin Zhuravlyov | 4b3847e | 2017-04-06 23:02:33 +0000 | [diff] [blame] | 15 | ; GFX9-DAG: s_getreg_b32 [[SSRC_SHARED:s[0-9]+]], hwreg(15, 16, 16) |
| 16 | ; GFX9-DAG: s_lshl_b32 [[SSRC_SHARED_BASE:s[0-9]+]], [[SSRC_SHARED]], 16 |
| 17 | ; GFX9-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[SSRC_SHARED_BASE]] |
| 18 | |
| 19 | ; GFX9-XXX: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], src_shared_base |
Matt Arsenault | e823d92 | 2017-02-18 18:29:53 +0000 | [diff] [blame] | 20 | |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 21 | ; HSA-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]] |
| 22 | |
Matt Arsenault | 5d8eb25 | 2016-09-30 01:50:20 +0000 | [diff] [blame] | 23 | ; HSA-DAG: v_cmp_ne_u32_e64 vcc, [[PTR]], -1 |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 24 | ; HSA-DAG: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, [[VAPERTURE]] |
| 25 | ; HSA-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, [[VPTR]] |
| 26 | ; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7 |
| 27 | |
| 28 | ; HSA: flat_store_dword v{{\[}}[[LO]]:[[HI]]{{\]}}, [[K]] |
Matt Arsenault | e823d92 | 2017-02-18 18:29:53 +0000 | [diff] [blame] | 29 | |
| 30 | ; At most 2 digits. Make sure src_shared_base is not counted as a high |
| 31 | ; number SGPR. |
| 32 | |
| 33 | ; CI: NumSgprs: {{[0-9][0-9]+}} |
| 34 | ; GFX9: NumSgprs: {{[0-9]+}} |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 35 | define amdgpu_kernel void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #0 { |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 36 | %stof = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)* |
| 37 | store volatile i32 7, i32 addrspace(4)* %stof |
| 38 | ret void |
| 39 | } |
| 40 | |
| 41 | ; HSA-LABEL: {{^}}use_private_to_flat_addrspacecast: |
| 42 | ; HSA: enable_sgpr_private_segment_buffer = 1 |
| 43 | ; HSA: enable_sgpr_dispatch_ptr = 0 |
Matt Arsenault | e823d92 | 2017-02-18 18:29:53 +0000 | [diff] [blame] | 44 | ; CI: enable_sgpr_queue_ptr = 1 |
| 45 | ; GFX9: enable_sgpr_queue_ptr = 0 |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 46 | |
Matt Arsenault | e823d92 | 2017-02-18 18:29:53 +0000 | [diff] [blame] | 47 | ; CI-DAG: s_load_dword [[PTR:s[0-9]+]], s[6:7], 0x0{{$}} |
| 48 | ; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x11{{$}} |
| 49 | ; CI-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[APERTURE]] |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 50 | |
Matt Arsenault | e823d92 | 2017-02-18 18:29:53 +0000 | [diff] [blame] | 51 | ; GFX9-DAG: s_load_dword [[PTR:s[0-9]+]], s[4:5], 0x0{{$}} |
Konstantin Zhuravlyov | 4b3847e | 2017-04-06 23:02:33 +0000 | [diff] [blame] | 52 | ; GFX9-DAG: s_getreg_b32 [[SSRC_PRIVATE:s[0-9]+]], hwreg(15, 0, 16) |
| 53 | ; GFX9-DAG: s_lshl_b32 [[SSRC_PRIVATE_BASE:s[0-9]+]], [[SSRC_PRIVATE]], 16 |
| 54 | ; GFX9-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[SSRC_PRIVATE_BASE]] |
| 55 | |
| 56 | ; GFX9-XXX: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], src_private_base |
Matt Arsenault | e823d92 | 2017-02-18 18:29:53 +0000 | [diff] [blame] | 57 | |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 58 | ; HSA-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]] |
| 59 | |
Matt Arsenault | 971c85e | 2017-03-13 19:47:31 +0000 | [diff] [blame] | 60 | ; HSA-DAG: v_cmp_ne_u32_e64 vcc, [[PTR]], 0 |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 61 | ; HSA-DAG: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, [[VAPERTURE]] |
| 62 | ; HSA-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, [[VPTR]] |
| 63 | ; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7 |
| 64 | |
| 65 | ; HSA: flat_store_dword v{{\[}}[[LO]]:[[HI]]{{\]}}, [[K]] |
Matt Arsenault | e823d92 | 2017-02-18 18:29:53 +0000 | [diff] [blame] | 66 | |
| 67 | ; CI: NumSgprs: {{[0-9][0-9]+}} |
| 68 | ; GFX9: NumSgprs: {{[0-9]+}} |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 69 | define amdgpu_kernel void @use_private_to_flat_addrspacecast(i32* %ptr) #0 { |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 70 | %stof = addrspacecast i32* %ptr to i32 addrspace(4)* |
| 71 | store volatile i32 7, i32 addrspace(4)* %stof |
| 72 | ret void |
| 73 | } |
| 74 | |
| 75 | ; no-op |
| 76 | ; HSA-LABEL: {{^}}use_global_to_flat_addrspacecast: |
| 77 | ; HSA: enable_sgpr_queue_ptr = 0 |
| 78 | |
| 79 | ; HSA: s_load_dwordx2 s{{\[}}[[PTRLO:[0-9]+]]:[[PTRHI:[0-9]+]]{{\]}} |
| 80 | ; HSA-DAG: v_mov_b32_e32 v[[VPTRLO:[0-9]+]], s[[PTRLO]] |
| 81 | ; HSA-DAG: v_mov_b32_e32 v[[VPTRHI:[0-9]+]], s[[PTRHI]] |
| 82 | ; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7 |
| 83 | ; HSA: flat_store_dword v{{\[}}[[VPTRLO]]:[[VPTRHI]]{{\]}}, [[K]] |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 84 | define amdgpu_kernel void @use_global_to_flat_addrspacecast(i32 addrspace(1)* %ptr) #0 { |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 85 | %stof = addrspacecast i32 addrspace(1)* %ptr to i32 addrspace(4)* |
| 86 | store volatile i32 7, i32 addrspace(4)* %stof |
| 87 | ret void |
| 88 | } |
| 89 | |
| 90 | ; no-op |
| 91 | ; HSA-LABEl: {{^}}use_constant_to_flat_addrspacecast: |
| 92 | ; HSA: s_load_dwordx2 s{{\[}}[[PTRLO:[0-9]+]]:[[PTRHI:[0-9]+]]{{\]}} |
| 93 | ; HSA-DAG: v_mov_b32_e32 v[[VPTRLO:[0-9]+]], s[[PTRLO]] |
| 94 | ; HSA-DAG: v_mov_b32_e32 v[[VPTRHI:[0-9]+]], s[[PTRHI]] |
| 95 | ; HSA: flat_load_dword v{{[0-9]+}}, v{{\[}}[[VPTRLO]]:[[VPTRHI]]{{\]}} |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 96 | define amdgpu_kernel void @use_constant_to_flat_addrspacecast(i32 addrspace(2)* %ptr) #0 { |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 97 | %stof = addrspacecast i32 addrspace(2)* %ptr to i32 addrspace(4)* |
| 98 | %ld = load volatile i32, i32 addrspace(4)* %stof |
| 99 | ret void |
| 100 | } |
| 101 | |
| 102 | ; HSA-LABEL: {{^}}use_flat_to_group_addrspacecast: |
| 103 | ; HSA: enable_sgpr_private_segment_buffer = 1 |
| 104 | ; HSA: enable_sgpr_dispatch_ptr = 0 |
| 105 | ; HSA: enable_sgpr_queue_ptr = 0 |
| 106 | |
| 107 | ; HSA: s_load_dwordx2 s{{\[}}[[PTR_LO:[0-9]+]]:[[PTR_HI:[0-9]+]]{{\]}} |
Matt Arsenault | 5d8eb25 | 2016-09-30 01:50:20 +0000 | [diff] [blame] | 108 | ; HSA-DAG: v_cmp_ne_u64_e64 vcc, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0{{$}} |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 109 | ; HSA-DAG: v_mov_b32_e32 v[[VPTR_LO:[0-9]+]], s[[PTR_LO]] |
| 110 | ; HSA-DAG: v_cndmask_b32_e32 [[CASTPTR:v[0-9]+]], -1, v[[VPTR_LO]] |
| 111 | ; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 0{{$}} |
| 112 | ; HSA: ds_write_b32 [[CASTPTR]], v[[K]] |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 113 | define amdgpu_kernel void @use_flat_to_group_addrspacecast(i32 addrspace(4)* %ptr) #0 { |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 114 | %ftos = addrspacecast i32 addrspace(4)* %ptr to i32 addrspace(3)* |
| 115 | store volatile i32 0, i32 addrspace(3)* %ftos |
| 116 | ret void |
| 117 | } |
| 118 | |
| 119 | ; HSA-LABEL: {{^}}use_flat_to_private_addrspacecast: |
| 120 | ; HSA: enable_sgpr_private_segment_buffer = 1 |
| 121 | ; HSA: enable_sgpr_dispatch_ptr = 0 |
| 122 | ; HSA: enable_sgpr_queue_ptr = 0 |
| 123 | |
| 124 | ; HSA: s_load_dwordx2 s{{\[}}[[PTR_LO:[0-9]+]]:[[PTR_HI:[0-9]+]]{{\]}} |
Matt Arsenault | 5d8eb25 | 2016-09-30 01:50:20 +0000 | [diff] [blame] | 125 | ; HSA-DAG: v_cmp_ne_u64_e64 vcc, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0{{$}} |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 126 | ; HSA-DAG: v_mov_b32_e32 v[[VPTR_LO:[0-9]+]], s[[PTR_LO]] |
Matt Arsenault | 971c85e | 2017-03-13 19:47:31 +0000 | [diff] [blame] | 127 | ; HSA-DAG: v_cndmask_b32_e32 [[CASTPTR:v[0-9]+]], 0, v[[VPTR_LO]] |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 128 | ; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 0{{$}} |
| 129 | ; HSA: buffer_store_dword v[[K]], [[CASTPTR]], s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}} |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 130 | define amdgpu_kernel void @use_flat_to_private_addrspacecast(i32 addrspace(4)* %ptr) #0 { |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 131 | %ftos = addrspacecast i32 addrspace(4)* %ptr to i32* |
| 132 | store volatile i32 0, i32* %ftos |
| 133 | ret void |
| 134 | } |
| 135 | |
| 136 | ; HSA-LABEL: {{^}}use_flat_to_global_addrspacecast: |
| 137 | ; HSA: enable_sgpr_queue_ptr = 0 |
| 138 | |
| 139 | ; HSA: s_load_dwordx2 s{{\[}}[[PTRLO:[0-9]+]]:[[PTRHI:[0-9]+]]{{\]}}, s[4:5], 0x0 |
| 140 | ; HSA-DAG: v_mov_b32_e32 v[[VPTRLO:[0-9]+]], s[[PTRLO]] |
| 141 | ; HSA-DAG: v_mov_b32_e32 v[[VPTRHI:[0-9]+]], s[[PTRHI]] |
| 142 | ; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0 |
| 143 | ; HSA: flat_store_dword v{{\[}}[[VPTRLO]]:[[VPTRHI]]{{\]}}, [[K]] |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 144 | define amdgpu_kernel void @use_flat_to_global_addrspacecast(i32 addrspace(4)* %ptr) #0 { |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 145 | %ftos = addrspacecast i32 addrspace(4)* %ptr to i32 addrspace(1)* |
| 146 | store volatile i32 0, i32 addrspace(1)* %ftos |
| 147 | ret void |
| 148 | } |
| 149 | |
| 150 | ; HSA-LABEL: {{^}}use_flat_to_constant_addrspacecast: |
| 151 | ; HSA: enable_sgpr_queue_ptr = 0 |
| 152 | |
| 153 | ; HSA: s_load_dwordx2 s{{\[}}[[PTRLO:[0-9]+]]:[[PTRHI:[0-9]+]]{{\]}}, s[4:5], 0x0 |
| 154 | ; HSA: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTRLO]]:[[PTRHI]]{{\]}}, 0x0 |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 155 | define amdgpu_kernel void @use_flat_to_constant_addrspacecast(i32 addrspace(4)* %ptr) #0 { |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 156 | %ftos = addrspacecast i32 addrspace(4)* %ptr to i32 addrspace(2)* |
| 157 | load volatile i32, i32 addrspace(2)* %ftos |
| 158 | ret void |
| 159 | } |
| 160 | |
| 161 | ; HSA-LABEL: {{^}}cast_0_group_to_flat_addrspacecast: |
Matt Arsenault | e823d92 | 2017-02-18 18:29:53 +0000 | [diff] [blame] | 162 | ; CI: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x10 |
| 163 | ; CI-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], [[APERTURE]] |
Konstantin Zhuravlyov | 4b3847e | 2017-04-06 23:02:33 +0000 | [diff] [blame] | 164 | ; GFX9-DAG: s_getreg_b32 [[SSRC_SHARED:s[0-9]+]], hwreg(15, 16, 16) |
| 165 | ; GFX9-DAG: s_lshl_b32 [[SSRC_SHARED_BASE:s[0-9]+]], [[SSRC_SHARED]], 16 |
| 166 | ; GFX9-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], [[SSRC_SHARED_BASE]] |
| 167 | |
| 168 | ; GFX9-XXX: v_mov_b32_e32 v[[HI:[0-9]+]], src_shared_base |
Matt Arsenault | e823d92 | 2017-02-18 18:29:53 +0000 | [diff] [blame] | 169 | |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 170 | ; HSA-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}} |
| 171 | ; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 7{{$}} |
| 172 | ; HSA: flat_store_dword v{{\[}}[[LO]]:[[HI]]{{\]}}, v[[K]] |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 173 | define amdgpu_kernel void @cast_0_group_to_flat_addrspacecast() #0 { |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 174 | %cast = addrspacecast i32 addrspace(3)* null to i32 addrspace(4)* |
Matt Arsenault | 417e007 | 2017-02-08 06:16:04 +0000 | [diff] [blame] | 175 | store volatile i32 7, i32 addrspace(4)* %cast |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 176 | ret void |
| 177 | } |
| 178 | |
| 179 | ; HSA-LABEL: {{^}}cast_0_flat_to_group_addrspacecast: |
| 180 | ; HSA-DAG: v_mov_b32_e32 [[PTR:v[0-9]+]], -1{{$}} |
| 181 | ; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7{{$}} |
| 182 | ; HSA: ds_write_b32 [[PTR]], [[K]] |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 183 | define amdgpu_kernel void @cast_0_flat_to_group_addrspacecast() #0 { |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 184 | %cast = addrspacecast i32 addrspace(4)* null to i32 addrspace(3)* |
Matt Arsenault | 417e007 | 2017-02-08 06:16:04 +0000 | [diff] [blame] | 185 | store volatile i32 7, i32 addrspace(3)* %cast |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 186 | ret void |
| 187 | } |
| 188 | |
| 189 | ; HSA-LABEL: {{^}}cast_neg1_group_to_flat_addrspacecast: |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 190 | ; HSA: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}} |
Tom Stellard | cb6ba62 | 2016-04-30 00:23:06 +0000 | [diff] [blame] | 191 | ; HSA: v_mov_b32_e32 v[[K:[0-9]+]], 7{{$}} |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 192 | ; HSA: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}} |
| 193 | ; HSA: flat_store_dword v{{\[}}[[LO]]:[[HI]]{{\]}}, v[[K]] |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 194 | define amdgpu_kernel void @cast_neg1_group_to_flat_addrspacecast() #0 { |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 195 | %cast = addrspacecast i32 addrspace(3)* inttoptr (i32 -1 to i32 addrspace(3)*) to i32 addrspace(4)* |
Matt Arsenault | 417e007 | 2017-02-08 06:16:04 +0000 | [diff] [blame] | 196 | store volatile i32 7, i32 addrspace(4)* %cast |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 197 | ret void |
| 198 | } |
| 199 | |
| 200 | ; HSA-LABEL: {{^}}cast_neg1_flat_to_group_addrspacecast: |
| 201 | ; HSA-DAG: v_mov_b32_e32 [[PTR:v[0-9]+]], -1{{$}} |
| 202 | ; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7{{$}} |
| 203 | ; HSA: ds_write_b32 [[PTR]], [[K]] |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 204 | define amdgpu_kernel void @cast_neg1_flat_to_group_addrspacecast() #0 { |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 205 | %cast = addrspacecast i32 addrspace(4)* inttoptr (i64 -1 to i32 addrspace(4)*) to i32 addrspace(3)* |
Matt Arsenault | 417e007 | 2017-02-08 06:16:04 +0000 | [diff] [blame] | 206 | store volatile i32 7, i32 addrspace(3)* %cast |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 207 | ret void |
| 208 | } |
| 209 | |
Matt Arsenault | 971c85e | 2017-03-13 19:47:31 +0000 | [diff] [blame] | 210 | ; FIXME: Shouldn't need to enable queue ptr |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 211 | ; HSA-LABEL: {{^}}cast_0_private_to_flat_addrspacecast: |
Matt Arsenault | 971c85e | 2017-03-13 19:47:31 +0000 | [diff] [blame] | 212 | ; CI: enable_sgpr_queue_ptr = 1 |
| 213 | ; GFX9: enable_sgpr_queue_ptr = 0 |
Matt Arsenault | e823d92 | 2017-02-18 18:29:53 +0000 | [diff] [blame] | 214 | |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 215 | ; HSA-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}} |
| 216 | ; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 7{{$}} |
Matt Arsenault | 971c85e | 2017-03-13 19:47:31 +0000 | [diff] [blame] | 217 | ; HSA: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}} |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 218 | ; HSA: flat_store_dword v{{\[}}[[LO]]:[[HI]]{{\]}}, v[[K]] |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 219 | define amdgpu_kernel void @cast_0_private_to_flat_addrspacecast() #0 { |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 220 | %cast = addrspacecast i32* null to i32 addrspace(4)* |
Matt Arsenault | 417e007 | 2017-02-08 06:16:04 +0000 | [diff] [blame] | 221 | store volatile i32 7, i32 addrspace(4)* %cast |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 222 | ret void |
| 223 | } |
| 224 | |
| 225 | ; HSA-LABEL: {{^}}cast_0_flat_to_private_addrspacecast: |
Matt Arsenault | 0774ea2 | 2017-04-24 19:40:59 +0000 | [diff] [blame] | 226 | ; HSA: v_mov_b32_e32 [[K:v[0-9]+]], 7{{$}} |
| 227 | ; HSA: buffer_store_dword [[K]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+$}} |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 228 | define amdgpu_kernel void @cast_0_flat_to_private_addrspacecast() #0 { |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 229 | %cast = addrspacecast i32 addrspace(4)* null to i32 addrspace(0)* |
Matt Arsenault | 417e007 | 2017-02-08 06:16:04 +0000 | [diff] [blame] | 230 | store volatile i32 7, i32* %cast |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 231 | ret void |
| 232 | } |
Matt Arsenault | 592d068 | 2015-12-01 23:04:05 +0000 | [diff] [blame] | 233 | |
| 234 | ; Disable optimizations in case there are optimizations added that |
| 235 | ; specialize away generic pointer accesses. |
| 236 | |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 237 | ; HSA-LABEL: {{^}}branch_use_flat_i32: |
| 238 | ; HSA: flat_store_dword {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} |
| 239 | ; HSA: s_endpgm |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 240 | define amdgpu_kernel void @branch_use_flat_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* %gptr, i32 addrspace(3)* %lptr, i32 %x, i32 %c) #0 { |
Matt Arsenault | 592d068 | 2015-12-01 23:04:05 +0000 | [diff] [blame] | 241 | entry: |
| 242 | %cmp = icmp ne i32 %c, 0 |
| 243 | br i1 %cmp, label %local, label %global |
| 244 | |
| 245 | local: |
| 246 | %flat_local = addrspacecast i32 addrspace(3)* %lptr to i32 addrspace(4)* |
| 247 | br label %end |
| 248 | |
| 249 | global: |
| 250 | %flat_global = addrspacecast i32 addrspace(1)* %gptr to i32 addrspace(4)* |
| 251 | br label %end |
| 252 | |
| 253 | end: |
| 254 | %fptr = phi i32 addrspace(4)* [ %flat_local, %local ], [ %flat_global, %global ] |
Matt Arsenault | 417e007 | 2017-02-08 06:16:04 +0000 | [diff] [blame] | 255 | store volatile i32 %x, i32 addrspace(4)* %fptr, align 4 |
Matt Arsenault | 592d068 | 2015-12-01 23:04:05 +0000 | [diff] [blame] | 256 | ; %val = load i32, i32 addrspace(4)* %fptr, align 4 |
| 257 | ; store i32 %val, i32 addrspace(1)* %out, align 4 |
| 258 | ret void |
| 259 | } |
| 260 | |
Matt Arsenault | 592d068 | 2015-12-01 23:04:05 +0000 | [diff] [blame] | 261 | ; Check for prologue initializing special SGPRs pointing to scratch. |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 262 | ; HSA-LABEL: {{^}}store_flat_scratch: |
Matt Arsenault | e823d92 | 2017-02-18 18:29:53 +0000 | [diff] [blame] | 263 | ; CI-DAG: s_mov_b32 flat_scratch_lo, s9 |
| 264 | ; CI-DAG: s_add_u32 [[ADD:s[0-9]+]], s8, s11 |
| 265 | ; CI: s_lshr_b32 flat_scratch_hi, [[ADD]], 8 |
| 266 | |
| 267 | ; GFX9: s_add_u32 flat_scratch_lo, s6, s9 |
| 268 | ; GFX9: s_addc_u32 flat_scratch_hi, s7, 0 |
| 269 | |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 270 | ; HSA: flat_store_dword |
| 271 | ; HSA: s_barrier |
| 272 | ; HSA: flat_load_dword |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 273 | define amdgpu_kernel void @store_flat_scratch(i32 addrspace(1)* noalias %out, i32) #0 { |
Matt Arsenault | 592d068 | 2015-12-01 23:04:05 +0000 | [diff] [blame] | 274 | %alloca = alloca i32, i32 9, align 4 |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 275 | %x = call i32 @llvm.amdgcn.workitem.id.x() #2 |
Matt Arsenault | 592d068 | 2015-12-01 23:04:05 +0000 | [diff] [blame] | 276 | %pptr = getelementptr i32, i32* %alloca, i32 %x |
| 277 | %fptr = addrspacecast i32* %pptr to i32 addrspace(4)* |
Matt Arsenault | 417e007 | 2017-02-08 06:16:04 +0000 | [diff] [blame] | 278 | store volatile i32 %x, i32 addrspace(4)* %fptr |
Matt Arsenault | 592d068 | 2015-12-01 23:04:05 +0000 | [diff] [blame] | 279 | ; Dummy call |
Matt Arsenault | 9c47dd5 | 2016-02-11 06:02:01 +0000 | [diff] [blame] | 280 | call void @llvm.amdgcn.s.barrier() #1 |
Matt Arsenault | 417e007 | 2017-02-08 06:16:04 +0000 | [diff] [blame] | 281 | %reload = load volatile i32, i32 addrspace(4)* %fptr, align 4 |
| 282 | store volatile i32 %reload, i32 addrspace(1)* %out, align 4 |
Matt Arsenault | 592d068 | 2015-12-01 23:04:05 +0000 | [diff] [blame] | 283 | ret void |
| 284 | } |
| 285 | |
Matt Arsenault | 9c47dd5 | 2016-02-11 06:02:01 +0000 | [diff] [blame] | 286 | declare void @llvm.amdgcn.s.barrier() #1 |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 287 | declare i32 @llvm.amdgcn.workitem.id.x() #2 |
Matt Arsenault | 592d068 | 2015-12-01 23:04:05 +0000 | [diff] [blame] | 288 | |
| 289 | attributes #0 = { nounwind } |
Matt Arsenault | 2aed6ca | 2015-12-19 01:46:41 +0000 | [diff] [blame] | 290 | attributes #1 = { nounwind convergent } |
Matt Arsenault | 99c1452 | 2016-04-25 19:27:24 +0000 | [diff] [blame] | 291 | attributes #2 = { nounwind readnone } |