| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 1 | ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=MOVREL %s |
| Matt Arsenault | 7aad8fd | 2017-01-24 22:02:15 +0000 | [diff] [blame] | 2 | ; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=MOVREL %s |
| 3 | ; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -amdgpu-vgpr-index-mode -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=IDXMODE %s |
| Marek Olsak | e22fdb9 | 2017-03-21 17:00:32 +0000 | [diff] [blame] | 4 | ; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=IDXMODE %s |
| Tom Stellard | eef2ad9 | 2013-08-05 22:45:56 +0000 | [diff] [blame] | 5 | |
| 6 | ; Tests for indirect addressing on SI, which is implemented using dynamic |
| 7 | ; indexing of vectors. |
| 8 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 9 | ; GCN-LABEL: {{^}}extract_w_offset: |
| 10 | ; GCN-DAG: s_load_dword [[IN:s[0-9]+]] |
| 11 | ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 4.0 |
| 12 | ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40400000 |
| 13 | ; GCN-DAG: v_mov_b32_e32 [[BASEREG:v[0-9]+]], 2.0 |
| 14 | ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 1.0 |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 15 | |
| 16 | ; MOVREL-DAG: s_mov_b32 m0, [[IN]] |
| 17 | ; MOVREL: v_movrels_b32_e32 v{{[0-9]+}}, [[BASEREG]] |
| 18 | |
| 19 | ; IDXMODE: s_set_gpr_idx_on [[IN]], src0{{$}} |
| 20 | ; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, [[BASEREG]] |
| 21 | ; IDXMODE-NEXT: s_set_gpr_idx_off |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 22 | define amdgpu_kernel void @extract_w_offset(float addrspace(1)* %out, i32 %in) { |
| Tom Stellard | eef2ad9 | 2013-08-05 22:45:56 +0000 | [diff] [blame] | 23 | entry: |
| Matt Arsenault | 2841927 | 2015-10-07 00:42:51 +0000 | [diff] [blame] | 24 | %idx = add i32 %in, 1 |
| 25 | %elt = extractelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, i32 %idx |
| 26 | store float %elt, float addrspace(1)* %out |
| 27 | ret void |
| 28 | } |
| 29 | |
| 30 | ; XXX: Could do v_or_b32 directly |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 31 | ; GCN-LABEL: {{^}}extract_w_offset_salu_use_vector: |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 32 | ; MOVREL: s_mov_b32 m0 |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 33 | ; GCN-DAG: s_or_b32 |
| 34 | ; GCN-DAG: s_or_b32 |
| 35 | ; GCN-DAG: s_or_b32 |
| 36 | ; GCN-DAG: s_or_b32 |
| 37 | ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} |
| 38 | ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} |
| 39 | ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} |
| 40 | ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 41 | |
| 42 | ; MOVREL: v_movrels_b32_e32 |
| 43 | |
| 44 | ; IDXMODE: s_set_gpr_idx_on s{{[0-9]+}}, src0{{$}} |
| 45 | ; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}} |
| 46 | ; IDXMODE-NEXT: s_set_gpr_idx_off |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 47 | define amdgpu_kernel void @extract_w_offset_salu_use_vector(i32 addrspace(1)* %out, i32 %in, <4 x i32> %or.val) { |
| Matt Arsenault | 2841927 | 2015-10-07 00:42:51 +0000 | [diff] [blame] | 48 | entry: |
| 49 | %idx = add i32 %in, 1 |
| 50 | %vec = or <4 x i32> %or.val, <i32 1, i32 2, i32 3, i32 4> |
| 51 | %elt = extractelement <4 x i32> %vec, i32 %idx |
| 52 | store i32 %elt, i32 addrspace(1)* %out |
| Tom Stellard | eef2ad9 | 2013-08-05 22:45:56 +0000 | [diff] [blame] | 53 | ret void |
| 54 | } |
| 55 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 56 | ; GCN-LABEL: {{^}}extract_wo_offset: |
| 57 | ; GCN-DAG: s_load_dword [[IN:s[0-9]+]] |
| 58 | ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 4.0 |
| 59 | ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40400000 |
| 60 | ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 2.0 |
| 61 | ; GCN-DAG: v_mov_b32_e32 [[BASEREG:v[0-9]+]], 1.0 |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 62 | |
| 63 | ; MOVREL-DAG: s_mov_b32 m0, [[IN]] |
| 64 | ; MOVREL: v_movrels_b32_e32 v{{[0-9]+}}, [[BASEREG]] |
| 65 | |
| 66 | ; IDXMODE: s_set_gpr_idx_on [[IN]], src0{{$}} |
| 67 | ; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, [[BASEREG]] |
| 68 | ; IDXMODE-NEXT: s_set_gpr_idx_off |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 69 | define amdgpu_kernel void @extract_wo_offset(float addrspace(1)* %out, i32 %in) { |
| Tom Stellard | eef2ad9 | 2013-08-05 22:45:56 +0000 | [diff] [blame] | 70 | entry: |
| Matt Arsenault | 2841927 | 2015-10-07 00:42:51 +0000 | [diff] [blame] | 71 | %elt = extractelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, i32 %in |
| 72 | store float %elt, float addrspace(1)* %out |
| Tom Stellard | eef2ad9 | 2013-08-05 22:45:56 +0000 | [diff] [blame] | 73 | ret void |
| 74 | } |
| 75 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 76 | ; GCN-LABEL: {{^}}extract_neg_offset_sgpr: |
| Tom Stellard | 8b0182a | 2015-04-23 20:32:01 +0000 | [diff] [blame] | 77 | ; The offset depends on the register that holds the first element of the vector. |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 78 | ; MOVREL: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}} |
| 79 | ; MOVREL: v_movrels_b32_e32 v{{[0-9]}}, v0 |
| 80 | |
| 81 | ; IDXMODE: s_addk_i32 [[ADD_IDX:s[0-9]+]], 0xfe00{{$}} |
| Matthias Braun | 325cd2c | 2016-11-11 01:34:21 +0000 | [diff] [blame] | 82 | ; IDXMODE: v_mov_b32_e32 v2, 2 |
| 83 | ; IDXMODE: v_mov_b32_e32 v3, 3 |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 84 | ; IDXMODE-NEXT: s_set_gpr_idx_on [[ADD_IDX]], src0{{$}} |
| 85 | ; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}} |
| 86 | ; IDXMODE-NEXT: s_set_gpr_idx_off |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 87 | define amdgpu_kernel void @extract_neg_offset_sgpr(i32 addrspace(1)* %out, i32 %offset) { |
| Tom Stellard | 8b0182a | 2015-04-23 20:32:01 +0000 | [diff] [blame] | 88 | entry: |
| 89 | %index = add i32 %offset, -512 |
| 90 | %value = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index |
| 91 | store i32 %value, i32 addrspace(1)* %out |
| 92 | ret void |
| 93 | } |
| 94 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 95 | ; GCN-LABEL: {{^}}extract_neg_offset_sgpr_loaded: |
| Matt Arsenault | 2841927 | 2015-10-07 00:42:51 +0000 | [diff] [blame] | 96 | ; The offset depends on the register that holds the first element of the vector. |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 97 | ; MOVREL: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}} |
| 98 | ; MOVREL: v_movrels_b32_e32 v{{[0-9]}}, v0 |
| 99 | |
| 100 | ; IDXMODE: s_addk_i32 [[ADD_IDX:s[0-9]+]], 0xfe00{{$}} |
| Matthias Braun | 325cd2c | 2016-11-11 01:34:21 +0000 | [diff] [blame] | 101 | ; IDXMODE: v_mov_b32_e32 v0, |
| Konstantin Zhuravlyov | 0a1a7b6 | 2016-11-17 16:41:49 +0000 | [diff] [blame] | 102 | ; IDXMODE: v_mov_b32_e32 v1, |
| 103 | ; IDXMODE: v_mov_b32_e32 v2, |
| 104 | ; IDXMODE: v_mov_b32_e32 v3, |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 105 | ; IDXMODE-NEXT: s_set_gpr_idx_on [[ADD_IDX]], src0{{$}} |
| 106 | ; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}} |
| 107 | ; IDXMODE-NEXT: s_set_gpr_idx_off |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 108 | define amdgpu_kernel void @extract_neg_offset_sgpr_loaded(i32 addrspace(1)* %out, <4 x i32> %vec0, <4 x i32> %vec1, i32 %offset) { |
| Matt Arsenault | 2841927 | 2015-10-07 00:42:51 +0000 | [diff] [blame] | 109 | entry: |
| 110 | %index = add i32 %offset, -512 |
| 111 | %or = or <4 x i32> %vec0, %vec1 |
| 112 | %value = extractelement <4 x i32> %or, i32 %index |
| 113 | store i32 %value, i32 addrspace(1)* %out |
| 114 | ret void |
| 115 | } |
| 116 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 117 | ; GCN-LABEL: {{^}}extract_neg_offset_vgpr: |
| Tom Stellard | 8b0182a | 2015-04-23 20:32:01 +0000 | [diff] [blame] | 118 | ; The offset depends on the register that holds the first element of the vector. |
| Matt Arsenault | cb540bc | 2016-07-19 00:35:03 +0000 | [diff] [blame] | 119 | |
| 120 | ; FIXME: The waitcnt for the argument load can go after the loop |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 121 | ; IDXMODE: s_set_gpr_idx_on 0, src0 |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 122 | ; GCN: s_mov_b64 s{{\[[0-9]+:[0-9]+\]}}, exec |
| Mark Searles | 70359ac | 2017-06-02 14:19:25 +0000 | [diff] [blame^] | 123 | ; GCN: [[LOOPBB:BB[0-9]+_[0-9]+]]: |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 124 | ; GCN: v_readfirstlane_b32 [[READLANE:s[0-9]+]], v{{[0-9]+}} |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 125 | |
| 126 | ; MOVREL: s_add_i32 m0, [[READLANE]], 0xfffffe0 |
| 127 | ; MOVREL: s_and_saveexec_b64 vcc, vcc |
| 128 | ; MOVREL: v_movrels_b32_e32 [[RESULT:v[0-9]+]], v1 |
| 129 | |
| 130 | ; IDXMODE: s_addk_i32 [[ADD_IDX:s[0-9]+]], 0xfe00 |
| 131 | ; IDXMODE: s_set_gpr_idx_idx [[ADD_IDX]] |
| 132 | ; IDXMODE: s_and_saveexec_b64 vcc, vcc |
| 133 | ; IDXMODE: v_mov_b32_e32 [[RESULT:v[0-9]+]], v1 |
| 134 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 135 | ; GCN: s_cbranch_execnz |
| Matt Arsenault | cb540bc | 2016-07-19 00:35:03 +0000 | [diff] [blame] | 136 | |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 137 | ; IDXMODE: s_set_gpr_idx_off |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 138 | ; GCN: buffer_store_dword [[RESULT]] |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 139 | define amdgpu_kernel void @extract_neg_offset_vgpr(i32 addrspace(1)* %out) { |
| Tom Stellard | 8b0182a | 2015-04-23 20:32:01 +0000 | [diff] [blame] | 140 | entry: |
| Matt Arsenault | 9c47dd5 | 2016-02-11 06:02:01 +0000 | [diff] [blame] | 141 | %id = call i32 @llvm.amdgcn.workitem.id.x() #1 |
| Tom Stellard | 8b0182a | 2015-04-23 20:32:01 +0000 | [diff] [blame] | 142 | %index = add i32 %id, -512 |
| 143 | %value = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index |
| 144 | store i32 %value, i32 addrspace(1)* %out |
| 145 | ret void |
| 146 | } |
| 147 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 148 | ; GCN-LABEL: {{^}}extract_undef_offset_sgpr: |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 149 | define amdgpu_kernel void @extract_undef_offset_sgpr(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { |
| Matt Arsenault | 21a4625 | 2016-06-27 19:57:44 +0000 | [diff] [blame] | 150 | entry: |
| 151 | %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in |
| 152 | %value = extractelement <4 x i32> %ld, i32 undef |
| 153 | store i32 %value, i32 addrspace(1)* %out |
| 154 | ret void |
| 155 | } |
| 156 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 157 | ; GCN-LABEL: {{^}}insert_undef_offset_sgpr_vector_src: |
| 158 | ; GCN-DAG: buffer_load_dwordx4 |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 159 | ; MOVREL-DAG: s_mov_b32 m0, |
| 160 | ; MOVREL: v_movreld_b32 |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 161 | define amdgpu_kernel void @insert_undef_offset_sgpr_vector_src(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { |
| Matt Arsenault | 21a4625 | 2016-06-27 19:57:44 +0000 | [diff] [blame] | 162 | entry: |
| 163 | %ld = load <4 x i32>, <4 x i32> addrspace(1)* %in |
| 164 | %value = insertelement <4 x i32> %ld, i32 5, i32 undef |
| 165 | store <4 x i32> %value, <4 x i32> addrspace(1)* %out |
| 166 | ret void |
| 167 | } |
| 168 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 169 | ; GCN-LABEL: {{^}}insert_w_offset: |
| 170 | ; GCN-DAG: s_load_dword [[IN:s[0-9]+]] |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 171 | ; MOVREL-DAG: s_mov_b32 m0, [[IN]] |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 172 | ; GCN-DAG: v_mov_b32_e32 v[[ELT0:[0-9]+]], 1.0 |
| 173 | ; GCN-DAG: v_mov_b32_e32 v[[ELT1:[0-9]+]], 2.0 |
| 174 | ; GCN-DAG: v_mov_b32_e32 v[[ELT2:[0-9]+]], 0x40400000 |
| 175 | ; GCN-DAG: v_mov_b32_e32 v[[ELT3:[0-9]+]], 4.0 |
| 176 | ; GCN-DAG: v_mov_b32_e32 v[[INS:[0-9]+]], 0x40a00000 |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 177 | |
| 178 | ; MOVREL: v_movreld_b32_e32 v[[ELT1]], v[[INS]] |
| 179 | ; MOVREL: buffer_store_dwordx4 v{{\[}}[[ELT0]]:[[ELT3]]{{\]}} |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 180 | define amdgpu_kernel void @insert_w_offset(<4 x float> addrspace(1)* %out, i32 %in) { |
| Tom Stellard | eef2ad9 | 2013-08-05 22:45:56 +0000 | [diff] [blame] | 181 | entry: |
| 182 | %0 = add i32 %in, 1 |
| 183 | %1 = insertelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, float 5.0, i32 %0 |
| Matt Arsenault | f403df3 | 2016-08-26 06:31:32 +0000 | [diff] [blame] | 184 | store <4 x float> %1, <4 x float> addrspace(1)* %out |
| Tom Stellard | eef2ad9 | 2013-08-05 22:45:56 +0000 | [diff] [blame] | 185 | ret void |
| 186 | } |
| 187 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 188 | ; GCN-LABEL: {{^}}insert_wo_offset: |
| 189 | ; GCN: s_load_dword [[IN:s[0-9]+]] |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 190 | |
| 191 | ; MOVREL: s_mov_b32 m0, [[IN]] |
| 192 | ; MOVREL: v_movreld_b32_e32 v[[ELT0:[0-9]+]] |
| 193 | |
| 194 | ; IDXMODE: s_set_gpr_idx_on [[IN]], dst |
| 195 | ; IDXMODE-NEXT: v_mov_b32_e32 v[[ELT0:[0-9]+]], v{{[0-9]+}} |
| 196 | ; IDXMODE-NEXT: s_set_gpr_idx_off |
| 197 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 198 | ; GCN: buffer_store_dwordx4 v{{\[}}[[ELT0]]: |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 199 | define amdgpu_kernel void @insert_wo_offset(<4 x float> addrspace(1)* %out, i32 %in) { |
| Tom Stellard | eef2ad9 | 2013-08-05 22:45:56 +0000 | [diff] [blame] | 200 | entry: |
| 201 | %0 = insertelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, float 5.0, i32 %in |
| Matt Arsenault | f403df3 | 2016-08-26 06:31:32 +0000 | [diff] [blame] | 202 | store <4 x float> %0, <4 x float> addrspace(1)* %out |
| Tom Stellard | eef2ad9 | 2013-08-05 22:45:56 +0000 | [diff] [blame] | 203 | ret void |
| 204 | } |
| Tom Stellard | 8b0182a | 2015-04-23 20:32:01 +0000 | [diff] [blame] | 205 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 206 | ; GCN-LABEL: {{^}}insert_neg_offset_sgpr: |
| Tom Stellard | 8b0182a | 2015-04-23 20:32:01 +0000 | [diff] [blame] | 207 | ; The offset depends on the register that holds the first element of the vector. |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 208 | ; MOVREL: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}} |
| 209 | ; MOVREL: v_movreld_b32_e32 v0, 5 |
| 210 | |
| 211 | ; IDXMODE: s_addk_i32 [[ADD_IDX:s[0-9]+]], 0xfe00{{$}} |
| 212 | ; IDXMODE: s_set_gpr_idx_on [[ADD_IDX]], dst |
| 213 | ; IDXMODE-NEXT: v_mov_b32_e32 v0, 5 |
| 214 | ; IDXMODE-NEXT: s_set_gpr_idx_off |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 215 | define amdgpu_kernel void @insert_neg_offset_sgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out, i32 %offset) { |
| Tom Stellard | 8b0182a | 2015-04-23 20:32:01 +0000 | [diff] [blame] | 216 | entry: |
| 217 | %index = add i32 %offset, -512 |
| 218 | %value = insertelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 5, i32 %index |
| 219 | store <4 x i32> %value, <4 x i32> addrspace(1)* %out |
| 220 | ret void |
| 221 | } |
| 222 | |
| Matt Arsenault | 2841927 | 2015-10-07 00:42:51 +0000 | [diff] [blame] | 223 | ; The vector indexed into is originally loaded into an SGPR rather |
| 224 | ; than built with a reg_sequence |
| 225 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 226 | ; GCN-LABEL: {{^}}insert_neg_offset_sgpr_loadreg: |
| Matt Arsenault | 2841927 | 2015-10-07 00:42:51 +0000 | [diff] [blame] | 227 | ; The offset depends on the register that holds the first element of the vector. |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 228 | ; MOVREL: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}} |
| 229 | ; MOVREL: v_movreld_b32_e32 v0, 5 |
| 230 | |
| 231 | ; IDXMODE: s_addk_i32 [[ADD_IDX:s[0-9]+]], 0xfe00{{$}} |
| 232 | ; IDXMODE: s_set_gpr_idx_on [[ADD_IDX]], dst |
| 233 | ; IDXMODE-NEXT: v_mov_b32_e32 v0, 5 |
| 234 | ; IDXMODE-NEXT: s_set_gpr_idx_off |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 235 | define amdgpu_kernel void @insert_neg_offset_sgpr_loadreg(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out, <4 x i32> %vec, i32 %offset) { |
| Matt Arsenault | 2841927 | 2015-10-07 00:42:51 +0000 | [diff] [blame] | 236 | entry: |
| 237 | %index = add i32 %offset, -512 |
| 238 | %value = insertelement <4 x i32> %vec, i32 5, i32 %index |
| 239 | store <4 x i32> %value, <4 x i32> addrspace(1)* %out |
| 240 | ret void |
| 241 | } |
| 242 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 243 | ; GCN-LABEL: {{^}}insert_neg_offset_vgpr: |
| Tom Stellard | 8b0182a | 2015-04-23 20:32:01 +0000 | [diff] [blame] | 244 | ; The offset depends on the register that holds the first element of the vector. |
| Matt Arsenault | cb540bc | 2016-07-19 00:35:03 +0000 | [diff] [blame] | 245 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 246 | ; GCN-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], 1{{$}} |
| 247 | ; GCN-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], 2{{$}} |
| 248 | ; GCN-DAG: v_mov_b32_e32 [[VEC_ELT2:v[0-9]+]], 3{{$}} |
| 249 | ; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 4{{$}} |
| Matt Arsenault | cb540bc | 2016-07-19 00:35:03 +0000 | [diff] [blame] | 250 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 251 | ; GCN: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 252 | ; GCN: [[LOOPBB:BB[0-9]+_[0-9]+]]: |
| 253 | ; GCN: v_readfirstlane_b32 [[READLANE:s[0-9]+]] |
| Matt Arsenault | cb540bc | 2016-07-19 00:35:03 +0000 | [diff] [blame] | 254 | |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 255 | ; MOVREL: s_add_i32 m0, [[READLANE]], 0xfffffe00 |
| 256 | ; MOVREL: s_and_saveexec_b64 vcc, vcc |
| 257 | ; MOVREL: v_movreld_b32_e32 [[VEC_ELT0]], 5 |
| 258 | |
| 259 | ; IDXMODE: s_addk_i32 [[ADD_IDX:s[0-9]+]], 0xfe00{{$}} |
| 260 | ; IDXMODE: s_set_gpr_idx_idx [[ADD_IDX]] |
| 261 | ; IDXMODE: s_and_saveexec_b64 vcc, vcc |
| 262 | ; IDXMODE: v_mov_b32_e32 v{{[0-9]+}}, 5 |
| 263 | |
| 264 | ; GCN: s_cbranch_execnz [[LOOPBB]] |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 265 | ; GCN: s_mov_b64 exec, [[SAVEEXEC]] |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 266 | |
| 267 | ; IDXMODE: s_set_gpr_idx_off |
| 268 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 269 | ; GCN: buffer_store_dword |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 270 | define amdgpu_kernel void @insert_neg_offset_vgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out) { |
| Tom Stellard | 8b0182a | 2015-04-23 20:32:01 +0000 | [diff] [blame] | 271 | entry: |
| Matt Arsenault | 9c47dd5 | 2016-02-11 06:02:01 +0000 | [diff] [blame] | 272 | %id = call i32 @llvm.amdgcn.workitem.id.x() #1 |
| Tom Stellard | 8b0182a | 2015-04-23 20:32:01 +0000 | [diff] [blame] | 273 | %index = add i32 %id, -512 |
| Matt Arsenault | cb540bc | 2016-07-19 00:35:03 +0000 | [diff] [blame] | 274 | %value = insertelement <4 x i32> <i32 1, i32 2, i32 3, i32 4>, i32 5, i32 %index |
| Tom Stellard | 8b0182a | 2015-04-23 20:32:01 +0000 | [diff] [blame] | 275 | store <4 x i32> %value, <4 x i32> addrspace(1)* %out |
| 276 | ret void |
| 277 | } |
| 278 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 279 | ; GCN-LABEL: {{^}}insert_neg_inline_offset_vgpr: |
| Matt Arsenault | cb540bc | 2016-07-19 00:35:03 +0000 | [diff] [blame] | 280 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 281 | ; GCN-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], 1{{$}} |
| 282 | ; GCN-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], 2{{$}} |
| 283 | ; GCN-DAG: v_mov_b32_e32 [[VEC_ELT2:v[0-9]+]], 3{{$}} |
| 284 | ; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 4{{$}} |
| 285 | ; GCN-DAG: v_mov_b32_e32 [[VAL:v[0-9]+]], 0x1f4{{$}} |
| Matt Arsenault | cb540bc | 2016-07-19 00:35:03 +0000 | [diff] [blame] | 286 | |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 287 | ; IDXMODE: s_set_gpr_idx_on 0, dst |
| 288 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 289 | ; GCN: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec |
| Matt Arsenault | cb540bc | 2016-07-19 00:35:03 +0000 | [diff] [blame] | 290 | |
| Tom Stellard | 8b0182a | 2015-04-23 20:32:01 +0000 | [diff] [blame] | 291 | ; The offset depends on the register that holds the first element of the vector. |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 292 | ; GCN: v_readfirstlane_b32 [[READLANE:s[0-9]+]] |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 293 | |
| 294 | ; MOVREL: s_add_i32 m0, [[READLANE]], -16 |
| 295 | ; MOVREL: v_movreld_b32_e32 [[VEC_ELT0]], [[VAL]] |
| 296 | |
| 297 | ; IDXMODE: s_add_i32 [[ADD_IDX:s[0-9]+]], [[READLANE]], -16 |
| 298 | ; IDXMODE: s_set_gpr_idx_idx [[ADD_IDX]] |
| 299 | ; IDXMODE: v_mov_b32_e32 [[VEC_ELT0]], [[VAL]] |
| 300 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 301 | ; GCN: s_cbranch_execnz |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 302 | |
| 303 | ; IDXMODE: s_set_gpr_idx_off |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 304 | define amdgpu_kernel void @insert_neg_inline_offset_vgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out) { |
| Tom Stellard | 8b0182a | 2015-04-23 20:32:01 +0000 | [diff] [blame] | 305 | entry: |
| Matt Arsenault | 9c47dd5 | 2016-02-11 06:02:01 +0000 | [diff] [blame] | 306 | %id = call i32 @llvm.amdgcn.workitem.id.x() #1 |
| Tom Stellard | 8b0182a | 2015-04-23 20:32:01 +0000 | [diff] [blame] | 307 | %index = add i32 %id, -16 |
| Matt Arsenault | cb540bc | 2016-07-19 00:35:03 +0000 | [diff] [blame] | 308 | %value = insertelement <4 x i32> <i32 1, i32 2, i32 3, i32 4>, i32 500, i32 %index |
| Tom Stellard | 8b0182a | 2015-04-23 20:32:01 +0000 | [diff] [blame] | 309 | store <4 x i32> %value, <4 x i32> addrspace(1)* %out |
| 310 | ret void |
| 311 | } |
| 312 | |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 313 | ; When the block is split to insert the loop, make sure any other |
| 314 | ; places that need to be expanded in the same block are also handled. |
| 315 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 316 | ; GCN-LABEL: {{^}}extract_vgpr_offset_multiple_in_block: |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 317 | |
| Matt Arsenault | cb540bc | 2016-07-19 00:35:03 +0000 | [diff] [blame] | 318 | ; FIXME: Why is vector copied in between? |
| 319 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 320 | ; GCN-DAG: {{buffer|flat}}_load_dword [[IDX0:v[0-9]+]] |
| 321 | ; GCN-DAG: s_mov_b32 [[S_ELT1:s[0-9]+]], 9 |
| 322 | ; GCN-DAG: s_mov_b32 [[S_ELT0:s[0-9]+]], 7 |
| 323 | ; GCN-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], [[S_ELT0]] |
| 324 | ; GCN-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], [[S_ELT1]] |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 325 | |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 326 | ; IDXMODE: s_set_gpr_idx_on 0, src0 |
| 327 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 328 | ; GCN: s_mov_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], exec |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 329 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 330 | ; GCN: [[LOOP0:BB[0-9]+_[0-9]+]]: |
| Mark Searles | 70359ac | 2017-06-02 14:19:25 +0000 | [diff] [blame^] | 331 | ; GCN-NEXT: s_waitcnt vmcnt(0) |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 332 | ; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]] |
| 333 | ; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]] |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 334 | |
| 335 | ; MOVREL: s_mov_b32 m0, [[READLANE]] |
| 336 | ; MOVREL: s_and_saveexec_b64 vcc, vcc |
| 337 | ; MOVREL: v_movrels_b32_e32 [[MOVREL0:v[0-9]+]], [[VEC_ELT0]] |
| 338 | |
| 339 | ; IDXMODE: s_set_gpr_idx_idx [[READLANE]] |
| 340 | ; IDXMODE: s_and_saveexec_b64 vcc, vcc |
| 341 | ; IDXMODE: v_mov_b32_e32 [[MOVREL0:v[0-9]+]], [[VEC_ELT0]] |
| 342 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 343 | ; GCN-NEXT: s_xor_b64 exec, exec, vcc |
| 344 | ; GCN-NEXT: s_cbranch_execnz [[LOOP0]] |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 345 | |
| 346 | ; FIXME: Redundant copy |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 347 | ; GCN: s_mov_b64 exec, [[MASK]] |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 348 | ; IDXMODE: s_set_gpr_idx_off |
| 349 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 350 | ; GCN: v_mov_b32_e32 [[VEC_ELT1_2:v[0-9]+]], [[S_ELT1]] |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 351 | |
| 352 | ; IDXMODE: s_set_gpr_idx_on 0, src0 |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 353 | ; GCN: s_mov_b64 [[MASK2:s\[[0-9]+:[0-9]+\]]], exec |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 354 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 355 | ; GCN: [[LOOP1:BB[0-9]+_[0-9]+]]: |
| 356 | ; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]] |
| 357 | ; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]] |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 358 | |
| 359 | ; MOVREL: s_mov_b32 m0, [[READLANE]] |
| 360 | ; MOVREL: s_and_saveexec_b64 vcc, vcc |
| 361 | ; MOVREL-NEXT: v_movrels_b32_e32 [[MOVREL1:v[0-9]+]], [[VEC_ELT1_2]] |
| 362 | |
| 363 | ; IDXMODE: s_set_gpr_idx_idx [[READLANE]] |
| 364 | ; IDXMODE: s_and_saveexec_b64 vcc, vcc |
| 365 | ; IDXMODE-NEXT: v_mov_b32_e32 [[MOVREL1:v[0-9]+]], [[VEC_ELT1_2]] |
| 366 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 367 | ; GCN-NEXT: s_xor_b64 exec, exec, vcc |
| 368 | ; GCN: s_cbranch_execnz [[LOOP1]] |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 369 | |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 370 | ; IDXMODE: s_set_gpr_idx_off |
| 371 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 372 | ; GCN: buffer_store_dword [[MOVREL0]] |
| 373 | ; GCN: buffer_store_dword [[MOVREL1]] |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 374 | define amdgpu_kernel void @extract_vgpr_offset_multiple_in_block(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %in) #0 { |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 375 | entry: |
| 376 | %id = call i32 @llvm.amdgcn.workitem.id.x() #1 |
| 377 | %id.ext = zext i32 %id to i64 |
| 378 | %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %id.ext |
| 379 | %idx0 = load volatile i32, i32 addrspace(1)* %gep |
| 380 | %idx1 = add i32 %idx0, 1 |
| 381 | %val0 = extractelement <4 x i32> <i32 7, i32 9, i32 11, i32 13>, i32 %idx0 |
| Matt Arsenault | 3cb4dde | 2016-06-22 23:40:57 +0000 | [diff] [blame] | 382 | %live.out.reg = call i32 asm sideeffect "s_mov_b32 $0, 17", "={SGPR4}" () |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 383 | %val1 = extractelement <4 x i32> <i32 7, i32 9, i32 11, i32 13>, i32 %idx1 |
| 384 | store volatile i32 %val0, i32 addrspace(1)* %out0 |
| 385 | store volatile i32 %val1, i32 addrspace(1)* %out0 |
| Matt Arsenault | 3cb4dde | 2016-06-22 23:40:57 +0000 | [diff] [blame] | 386 | %cmp = icmp eq i32 %id, 0 |
| 387 | br i1 %cmp, label %bb1, label %bb2 |
| 388 | |
| 389 | bb1: |
| 390 | store volatile i32 %live.out.reg, i32 addrspace(1)* undef |
| 391 | br label %bb2 |
| 392 | |
| 393 | bb2: |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 394 | ret void |
| 395 | } |
| 396 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 397 | ; GCN-LABEL: {{^}}insert_vgpr_offset_multiple_in_block: |
| 398 | ; GCN-DAG: s_load_dwordx4 s{{\[}}[[S_ELT0:[0-9]+]]:[[S_ELT3:[0-9]+]]{{\]}} |
| 399 | ; GCN-DAG: {{buffer|flat}}_load_dword [[IDX0:v[0-9]+]] |
| 400 | ; GCN-DAG: v_mov_b32 [[INS0:v[0-9]+]], 62 |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 401 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 402 | ; GCN-DAG: v_mov_b32_e32 v[[VEC_ELT3:[0-9]+]], s[[S_ELT3]] |
| 403 | ; GCN: v_mov_b32_e32 v[[VEC_ELT2:[0-9]+]], s{{[0-9]+}} |
| 404 | ; GCN: v_mov_b32_e32 v[[VEC_ELT1:[0-9]+]], s{{[0-9]+}} |
| 405 | ; GCN: v_mov_b32_e32 v[[VEC_ELT0:[0-9]+]], s[[S_ELT0]] |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 406 | |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 407 | ; IDXMODE: s_set_gpr_idx_on 0, dst |
| 408 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 409 | ; GCN: [[LOOP0:BB[0-9]+_[0-9]+]]: |
| Mark Searles | 70359ac | 2017-06-02 14:19:25 +0000 | [diff] [blame^] | 410 | ; GCN-NEXT: s_waitcnt vmcnt(0) |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 411 | ; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]] |
| 412 | ; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]] |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 413 | |
| 414 | ; MOVREL: s_mov_b32 m0, [[READLANE]] |
| 415 | ; MOVREL: s_and_saveexec_b64 vcc, vcc |
| 416 | ; MOVREL-NEXT: v_movreld_b32_e32 v[[VEC_ELT0]], [[INS0]] |
| 417 | |
| 418 | ; IDXMODE: s_set_gpr_idx_idx [[READLANE]] |
| 419 | ; IDXMODE: s_and_saveexec_b64 vcc, vcc |
| 420 | ; IDXMODE-NEXT: v_mov_b32_e32 v[[VEC_ELT0]], [[INS0]] |
| 421 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 422 | ; GCN-NEXT: s_xor_b64 exec, exec, vcc |
| 423 | ; GCN: s_cbranch_execnz [[LOOP0]] |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 424 | |
| 425 | ; FIXME: Redundant copy |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 426 | ; GCN: s_mov_b64 exec, [[MASK:s\[[0-9]+:[0-9]+\]]] |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 427 | ; IDXMODE: s_set_gpr_idx_off |
| 428 | |
| 429 | ; IDXMODE: s_set_gpr_idx_on 0, dst |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 430 | ; GCN: s_mov_b64 [[MASK]], exec |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 431 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 432 | ; GCN: [[LOOP1:BB[0-9]+_[0-9]+]]: |
| 433 | ; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]] |
| 434 | ; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]] |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 435 | |
| 436 | ; MOVREL: s_mov_b32 m0, [[READLANE]] |
| 437 | ; MOVREL: s_and_saveexec_b64 vcc, vcc |
| 438 | ; MOVREL-NEXT: v_movreld_b32_e32 v[[VEC_ELT1]], 63 |
| 439 | |
| 440 | ; IDXMODE: s_set_gpr_idx_idx [[READLANE]] |
| 441 | ; IDXMODE: s_and_saveexec_b64 vcc, vcc |
| 442 | ; IDXMODE-NEXT: v_mov_b32_e32 v[[VEC_ELT1]], 63 |
| 443 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 444 | ; GCN-NEXT: s_xor_b64 exec, exec, vcc |
| 445 | ; GCN: s_cbranch_execnz [[LOOP1]] |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 446 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 447 | ; GCN: buffer_store_dwordx4 v{{\[}}[[VEC_ELT0]]: |
| Matt Arsenault | 3cb4dde | 2016-06-22 23:40:57 +0000 | [diff] [blame] | 448 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 449 | ; GCN: buffer_store_dword [[INS0]] |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 450 | define amdgpu_kernel void @insert_vgpr_offset_multiple_in_block(<4 x i32> addrspace(1)* %out0, <4 x i32> addrspace(1)* %out1, i32 addrspace(1)* %in, <4 x i32> %vec0) #0 { |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 451 | entry: |
| 452 | %id = call i32 @llvm.amdgcn.workitem.id.x() #1 |
| 453 | %id.ext = zext i32 %id to i64 |
| 454 | %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %id.ext |
| 455 | %idx0 = load volatile i32, i32 addrspace(1)* %gep |
| 456 | %idx1 = add i32 %idx0, 1 |
| Matt Arsenault | 3cb4dde | 2016-06-22 23:40:57 +0000 | [diff] [blame] | 457 | %live.out.val = call i32 asm sideeffect "v_mov_b32 $0, 62", "=v"() |
| 458 | %vec1 = insertelement <4 x i32> %vec0, i32 %live.out.val, i32 %idx0 |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 459 | %vec2 = insertelement <4 x i32> %vec1, i32 63, i32 %idx1 |
| 460 | store volatile <4 x i32> %vec2, <4 x i32> addrspace(1)* %out0 |
| Matt Arsenault | 3cb4dde | 2016-06-22 23:40:57 +0000 | [diff] [blame] | 461 | %cmp = icmp eq i32 %id, 0 |
| 462 | br i1 %cmp, label %bb1, label %bb2 |
| 463 | |
| 464 | bb1: |
| 465 | store volatile i32 %live.out.val, i32 addrspace(1)* undef |
| 466 | br label %bb2 |
| 467 | |
| 468 | bb2: |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 469 | ret void |
| 470 | } |
| 471 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 472 | ; GCN-LABEL: {{^}}extract_adjacent_blocks: |
| 473 | ; GCN: s_load_dword [[ARG:s[0-9]+]] |
| 474 | ; GCN: s_cmp_lg_u32 |
| 475 | ; GCN: s_cbranch_scc0 [[BB4:BB[0-9]+_[0-9]+]] |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 476 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 477 | ; GCN: buffer_load_dwordx4 |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 478 | ; MOVREL: s_mov_b32 m0, |
| 479 | ; MOVREL: v_movrels_b32_e32 |
| 480 | |
| 481 | ; IDXMODE: s_set_gpr_idx_on s{{[0-9]+}}, src0 |
| 482 | ; IDXMODE: v_mov_b32_e32 |
| 483 | ; IDXMODE: s_set_gpr_idx_off |
| 484 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 485 | ; GCN: s_branch [[ENDBB:BB[0-9]+_[0-9]+]] |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 486 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 487 | ; GCN: [[BB4]]: |
| 488 | ; GCN: buffer_load_dwordx4 |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 489 | ; MOVREL: s_mov_b32 m0, |
| 490 | ; MOVREL: v_movrels_b32_e32 |
| 491 | |
| 492 | ; IDXMODE: s_set_gpr_idx_on |
| 493 | ; IDXMODE: v_mov_b32_e32 |
| 494 | ; IDXMODE: s_set_gpr_idx_off |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 495 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 496 | ; GCN: [[ENDBB]]: |
| 497 | ; GCN: buffer_store_dword |
| 498 | ; GCN: s_endpgm |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 499 | define amdgpu_kernel void @extract_adjacent_blocks(i32 %arg) #0 { |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 500 | bb: |
| 501 | %tmp = icmp eq i32 %arg, 0 |
| 502 | br i1 %tmp, label %bb1, label %bb4 |
| 503 | |
| 504 | bb1: |
| 505 | %tmp2 = load volatile <4 x float>, <4 x float> addrspace(1)* undef |
| 506 | %tmp3 = extractelement <4 x float> %tmp2, i32 undef |
| Matt Arsenault | ad55ee5 | 2016-12-06 01:02:51 +0000 | [diff] [blame] | 507 | call void asm sideeffect "; reg use $0", "v"(<4 x float> %tmp2) #0 ; Prevent block optimize out |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 508 | br label %bb7 |
| 509 | |
| 510 | bb4: |
| 511 | %tmp5 = load volatile <4 x float>, <4 x float> addrspace(1)* undef |
| 512 | %tmp6 = extractelement <4 x float> %tmp5, i32 undef |
| Matt Arsenault | ad55ee5 | 2016-12-06 01:02:51 +0000 | [diff] [blame] | 513 | call void asm sideeffect "; reg use $0", "v"(<4 x float> %tmp5) #0 ; Prevent block optimize out |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 514 | br label %bb7 |
| 515 | |
| 516 | bb7: |
| 517 | %tmp8 = phi float [ %tmp3, %bb1 ], [ %tmp6, %bb4 ] |
| 518 | store volatile float %tmp8, float addrspace(1)* undef |
| 519 | ret void |
| 520 | } |
| 521 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 522 | ; GCN-LABEL: {{^}}insert_adjacent_blocks: |
| 523 | ; GCN: s_load_dword [[ARG:s[0-9]+]] |
| 524 | ; GCN: s_cmp_lg_u32 |
| 525 | ; GCN: s_cbranch_scc0 [[BB4:BB[0-9]+_[0-9]+]] |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 526 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 527 | ; GCN: buffer_load_dwordx4 |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 528 | ; MOVREL: s_mov_b32 m0, |
| 529 | ; MOVREL: v_movreld_b32_e32 |
| 530 | |
| 531 | ; IDXMODE: s_set_gpr_idx_on s{{[0-9]+}}, dst |
| 532 | ; IDXMODE: v_mov_b32_e32 |
| 533 | ; IDXMODE: s_set_gpr_idx_off |
| 534 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 535 | ; GCN: s_branch [[ENDBB:BB[0-9]+_[0-9]+]] |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 536 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 537 | ; GCN: [[BB4]]: |
| 538 | ; GCN: buffer_load_dwordx4 |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 539 | ; MOVREL: s_mov_b32 m0, |
| 540 | ; MOVREL: v_movreld_b32_e32 |
| 541 | |
| 542 | ; IDXMODE: s_set_gpr_idx_on s{{[0-9]+}}, dst |
| 543 | ; IDXMODE: v_mov_b32_e32 |
| 544 | ; IDXMODE: s_set_gpr_idx_off |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 545 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 546 | ; GCN: [[ENDBB]]: |
| 547 | ; GCN: buffer_store_dword |
| 548 | ; GCN: s_endpgm |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 549 | define amdgpu_kernel void @insert_adjacent_blocks(i32 %arg, float %val0) #0 { |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 550 | bb: |
| 551 | %tmp = icmp eq i32 %arg, 0 |
| 552 | br i1 %tmp, label %bb1, label %bb4 |
| 553 | |
| 554 | bb1: ; preds = %bb |
| 555 | %tmp2 = load volatile <4 x float>, <4 x float> addrspace(1)* undef |
| 556 | %tmp3 = insertelement <4 x float> %tmp2, float %val0, i32 undef |
| Matt Arsenault | ad55ee5 | 2016-12-06 01:02:51 +0000 | [diff] [blame] | 557 | call void asm sideeffect "; reg use $0", "v"(<4 x float> %tmp3) #0 ; Prevent block optimize out |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 558 | br label %bb7 |
| 559 | |
| 560 | bb4: ; preds = %bb |
| 561 | %tmp5 = load volatile <4 x float>, <4 x float> addrspace(1)* undef |
| 562 | %tmp6 = insertelement <4 x float> %tmp5, float %val0, i32 undef |
| Matt Arsenault | ad55ee5 | 2016-12-06 01:02:51 +0000 | [diff] [blame] | 563 | call void asm sideeffect "; reg use $0", "v"(<4 x float> %tmp6) #0 ; Prevent block optimize out |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 564 | br label %bb7 |
| 565 | |
| 566 | bb7: ; preds = %bb4, %bb1 |
| 567 | %tmp8 = phi <4 x float> [ %tmp3, %bb1 ], [ %tmp6, %bb4 ] |
| 568 | store volatile <4 x float> %tmp8, <4 x float> addrspace(1)* undef |
| 569 | ret void |
| 570 | } |
| 571 | |
| 572 | ; FIXME: Should be able to fold zero input to movreld to inline imm? |
| 573 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 574 | ; GCN-LABEL: {{^}}multi_same_block: |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 575 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 576 | ; GCN-DAG: v_mov_b32_e32 v[[VEC0_ELT0:[0-9]+]], 0x41880000 |
| 577 | ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41900000 |
| 578 | ; GCN-DAG: v_mov_b32_e32 v[[VEC0_ELT2:[0-9]+]], 0x41980000 |
| 579 | ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a00000 |
| 580 | ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a80000 |
| 581 | ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41b00000 |
| 582 | ; GCN-DAG: s_load_dword [[ARG:s[0-9]+]] |
| Matthias Braun | 325cd2c | 2016-11-11 01:34:21 +0000 | [diff] [blame] | 583 | ; IDXMODE-DAG: s_add_i32 [[ARG_ADD:s[0-9]+]], [[ARG]], -16 |
| Matt Arsenault | cb540bc | 2016-07-19 00:35:03 +0000 | [diff] [blame] | 584 | |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 585 | ; MOVREL-DAG: s_add_i32 m0, [[ARG]], -16 |
| 586 | ; MOVREL: v_movreld_b32_e32 v[[VEC0_ELT0]], 4.0 |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 587 | ; GCN-NOT: m0 |
| Matt Arsenault | cb540bc | 2016-07-19 00:35:03 +0000 | [diff] [blame] | 588 | |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 589 | ; IDXMODE: s_set_gpr_idx_on [[ARG_ADD]], dst |
| 590 | ; IDXMODE: v_mov_b32_e32 v[[VEC0_ELT0]], 4.0 |
| 591 | ; IDXMODE: s_set_gpr_idx_off |
| 592 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 593 | ; GCN: v_mov_b32_e32 v[[VEC0_ELT2]], 0x4188cccd |
| 594 | ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x4190cccd |
| 595 | ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x4198cccd |
| 596 | ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a0cccd |
| 597 | ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a8cccd |
| 598 | ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41b0cccd |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 599 | |
| 600 | ; MOVREL: v_movreld_b32_e32 v[[VEC0_ELT2]], -4.0 |
| 601 | |
| 602 | ; IDXMODE: s_set_gpr_idx_on [[ARG_ADD]], dst |
| 603 | ; IDXMODE: v_mov_b32_e32 v[[VEC0_ELT2]], -4.0 |
| 604 | ; IDXMODE: s_set_gpr_idx_off |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 605 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 606 | ; GCN: s_mov_b32 m0, -1 |
| 607 | ; GCN: ds_write_b32 |
| 608 | ; GCN: ds_write_b32 |
| 609 | ; GCN: s_endpgm |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 610 | define amdgpu_kernel void @multi_same_block(i32 %arg) #0 { |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 611 | bb: |
| 612 | %tmp1 = add i32 %arg, -16 |
| Matt Arsenault | cb540bc | 2016-07-19 00:35:03 +0000 | [diff] [blame] | 613 | %tmp2 = insertelement <6 x float> <float 1.700000e+01, float 1.800000e+01, float 1.900000e+01, float 2.000000e+01, float 2.100000e+01, float 2.200000e+01>, float 4.000000e+00, i32 %tmp1 |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 614 | %tmp3 = add i32 %arg, -16 |
| Matt Arsenault | cb540bc | 2016-07-19 00:35:03 +0000 | [diff] [blame] | 615 | %tmp4 = insertelement <6 x float> <float 0x40311999A0000000, float 0x40321999A0000000, float 0x40331999A0000000, float 0x40341999A0000000, float 0x40351999A0000000, float 0x40361999A0000000>, float -4.0, i32 %tmp3 |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 616 | %tmp5 = bitcast <6 x float> %tmp2 to <6 x i32> |
| 617 | %tmp6 = extractelement <6 x i32> %tmp5, i32 1 |
| 618 | %tmp7 = bitcast <6 x float> %tmp4 to <6 x i32> |
| 619 | %tmp8 = extractelement <6 x i32> %tmp7, i32 5 |
| 620 | store volatile i32 %tmp6, i32 addrspace(3)* undef, align 4 |
| 621 | store volatile i32 %tmp8, i32 addrspace(3)* undef, align 4 |
| 622 | ret void |
| 623 | } |
| 624 | |
| Matt Arsenault | b4d9503 | 2016-06-28 01:09:00 +0000 | [diff] [blame] | 625 | ; offset puts outside of superegister bounaries, so clamp to 1st element. |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 626 | ; GCN-LABEL: {{^}}extract_largest_inbounds_offset: |
| 627 | ; GCN-DAG: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]{{\]}} |
| 628 | ; GCN-DAG: s_load_dword [[IDX:s[0-9]+]] |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 629 | ; MOVREL: s_mov_b32 m0, [[IDX]] |
| 630 | ; MOVREL: v_movrels_b32_e32 [[EXTRACT:v[0-9]+]], v[[HI_ELT]] |
| 631 | |
| 632 | ; IDXMODE: s_set_gpr_idx_on [[IDX]], src0 |
| 633 | ; IDXMODE: v_mov_b32_e32 [[EXTRACT:v[0-9]+]], v[[HI_ELT]] |
| 634 | ; IDXMODE: s_set_gpr_idx_off |
| 635 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 636 | ; GCN: buffer_store_dword [[EXTRACT]] |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 637 | define amdgpu_kernel void @extract_largest_inbounds_offset(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) { |
| Matt Arsenault | b4d9503 | 2016-06-28 01:09:00 +0000 | [diff] [blame] | 638 | entry: |
| 639 | %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in |
| 640 | %offset = add i32 %idx, 3 |
| 641 | %value = extractelement <4 x i32> %ld, i32 %offset |
| 642 | store i32 %value, i32 addrspace(1)* %out |
| 643 | ret void |
| 644 | } |
| 645 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 646 | ; GCN-LABEL: {{^}}extract_out_of_bounds_offset: |
| 647 | ; GCN-DAG: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]{{\]}} |
| 648 | ; GCN-DAG: s_load_dword [[IDX:s[0-9]+]] |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 649 | ; MOVREL: s_add_i32 m0, [[IDX]], 4 |
| 650 | ; MOVREL: v_movrels_b32_e32 [[EXTRACT:v[0-9]+]], v[[LO_ELT]] |
| 651 | |
| 652 | ; IDXMODE: s_add_i32 [[ADD_IDX:s[0-9]+]], [[IDX]], 4 |
| 653 | ; IDXMODE: s_set_gpr_idx_on [[ADD_IDX]], src0 |
| 654 | ; IDXMODE: v_mov_b32_e32 [[EXTRACT:v[0-9]+]], v[[LO_ELT]] |
| 655 | ; IDXMODE: s_set_gpr_idx_off |
| 656 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 657 | ; GCN: buffer_store_dword [[EXTRACT]] |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 658 | define amdgpu_kernel void @extract_out_of_bounds_offset(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) { |
| Matt Arsenault | b4d9503 | 2016-06-28 01:09:00 +0000 | [diff] [blame] | 659 | entry: |
| 660 | %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in |
| 661 | %offset = add i32 %idx, 4 |
| 662 | %value = extractelement <4 x i32> %ld, i32 %offset |
| 663 | store i32 %value, i32 addrspace(1)* %out |
| 664 | ret void |
| 665 | } |
| 666 | |
| Matt Arsenault | 1322b6f | 2016-07-09 01:13:56 +0000 | [diff] [blame] | 667 | ; Test that the or is folded into the base address register instead of |
| 668 | ; added to m0 |
| 669 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 670 | ; GCN-LABEL: {{^}}extractelement_v4i32_or_index: |
| 671 | ; GCN: s_load_dword [[IDX_IN:s[0-9]+]] |
| 672 | ; GCN: s_lshl_b32 [[IDX_SHL:s[0-9]+]], [[IDX_IN]] |
| 673 | ; GCN-NOT: [[IDX_SHL]] |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 674 | |
| 675 | ; MOVREL: s_mov_b32 m0, [[IDX_SHL]] |
| 676 | ; MOVREL: v_movrels_b32_e32 v{{[0-9]+}}, v{{[0-9]+}} |
| 677 | |
| 678 | ; IDXMODE: s_set_gpr_idx_on [[IDX_SHL]], src0 |
| 679 | ; IDXMODE: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}} |
| 680 | ; IDXMODE: s_set_gpr_idx_off |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 681 | define amdgpu_kernel void @extractelement_v4i32_or_index(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx.in) { |
| Matt Arsenault | 1322b6f | 2016-07-09 01:13:56 +0000 | [diff] [blame] | 682 | entry: |
| 683 | %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in |
| 684 | %idx.shl = shl i32 %idx.in, 2 |
| 685 | %idx = or i32 %idx.shl, 1 |
| 686 | %value = extractelement <4 x i32> %ld, i32 %idx |
| 687 | store i32 %value, i32 addrspace(1)* %out |
| 688 | ret void |
| 689 | } |
| 690 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 691 | ; GCN-LABEL: {{^}}insertelement_v4f32_or_index: |
| 692 | ; GCN: s_load_dword [[IDX_IN:s[0-9]+]] |
| 693 | ; GCN: s_lshl_b32 [[IDX_SHL:s[0-9]+]], [[IDX_IN]] |
| 694 | ; GCN-NOT: [[IDX_SHL]] |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 695 | |
| 696 | ; MOVREL: s_mov_b32 m0, [[IDX_SHL]] |
| 697 | ; MOVREL: v_movreld_b32_e32 v{{[0-9]+}}, v{{[0-9]+}} |
| 698 | |
| 699 | ; IDXMODE: s_set_gpr_idx_on [[IDX_SHL]], dst |
| 700 | ; IDXMODE: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}} |
| 701 | ; IDXMODE: s_set_gpr_idx_off |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 702 | define amdgpu_kernel void @insertelement_v4f32_or_index(<4 x float> addrspace(1)* %out, <4 x float> %a, i32 %idx.in) nounwind { |
| Matt Arsenault | 1322b6f | 2016-07-09 01:13:56 +0000 | [diff] [blame] | 703 | %idx.shl = shl i32 %idx.in, 2 |
| 704 | %idx = or i32 %idx.shl, 1 |
| 705 | %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 %idx |
| 706 | store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16 |
| 707 | ret void |
| 708 | } |
| 709 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 710 | ; GCN-LABEL: {{^}}broken_phi_bb: |
| 711 | ; GCN: v_mov_b32_e32 [[PHIREG:v[0-9]+]], 8 |
| Matt Arsenault | f0ba86a | 2016-07-21 09:40:57 +0000 | [diff] [blame] | 712 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 713 | ; GCN: s_branch [[BB2:BB[0-9]+_[0-9]+]] |
| Matt Arsenault | f0ba86a | 2016-07-21 09:40:57 +0000 | [diff] [blame] | 714 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 715 | ; GCN: {{^BB[0-9]+_[0-9]+}}: |
| 716 | ; GCN: s_mov_b64 exec, |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 717 | ; IDXMODE: s_set_gpr_idx_off |
| Matt Arsenault | f0ba86a | 2016-07-21 09:40:57 +0000 | [diff] [blame] | 718 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 719 | ; GCN: [[BB2]]: |
| 720 | ; GCN: v_cmp_le_i32_e32 vcc, s{{[0-9]+}}, [[PHIREG]] |
| 721 | ; GCN: buffer_load_dword |
| Matt Arsenault | f0ba86a | 2016-07-21 09:40:57 +0000 | [diff] [blame] | 722 | |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 723 | ; GCN: [[REGLOOP:BB[0-9]+_[0-9]+]]: |
| Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 724 | ; MOVREL: v_movreld_b32_e32 |
| 725 | |
| 726 | ; IDXMODE: s_set_gpr_idx_idx |
| 727 | ; IDXMODE: v_mov_b32_e32 |
| Matt Arsenault | 93401f4 | 2016-10-07 03:55:04 +0000 | [diff] [blame] | 728 | ; GCN: s_cbranch_execnz [[REGLOOP]] |
| Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 729 | define amdgpu_kernel void @broken_phi_bb(i32 %arg, i32 %arg1) #0 { |
| Matt Arsenault | f0ba86a | 2016-07-21 09:40:57 +0000 | [diff] [blame] | 730 | bb: |
| 731 | br label %bb2 |
| 732 | |
| 733 | bb2: ; preds = %bb4, %bb |
| 734 | %tmp = phi i32 [ 8, %bb ], [ %tmp7, %bb4 ] |
| 735 | %tmp3 = icmp slt i32 %tmp, %arg |
| 736 | br i1 %tmp3, label %bb4, label %bb8 |
| 737 | |
| 738 | bb4: ; preds = %bb2 |
| 739 | %vgpr = load volatile i32, i32 addrspace(1)* undef |
| 740 | %tmp5 = insertelement <8 x i32> undef, i32 undef, i32 %vgpr |
| 741 | %tmp6 = insertelement <8 x i32> %tmp5, i32 %arg1, i32 %vgpr |
| 742 | %tmp7 = extractelement <8 x i32> %tmp6, i32 0 |
| 743 | br label %bb2 |
| 744 | |
| 745 | bb8: ; preds = %bb2 |
| 746 | ret void |
| 747 | } |
| 748 | |
| Matt Arsenault | 9c47dd5 | 2016-02-11 06:02:01 +0000 | [diff] [blame] | 749 | declare i32 @llvm.amdgcn.workitem.id.x() #1 |
| Matt Arsenault | ad55ee5 | 2016-12-06 01:02:51 +0000 | [diff] [blame] | 750 | declare void @llvm.amdgcn.s.barrier() #2 |
| Matt Arsenault | 9c47dd5 | 2016-02-11 06:02:01 +0000 | [diff] [blame] | 751 | |
| Matt Arsenault | 9babdf4 | 2016-06-22 20:15:28 +0000 | [diff] [blame] | 752 | attributes #0 = { nounwind } |
| Tom Stellard | 8b0182a | 2015-04-23 20:32:01 +0000 | [diff] [blame] | 753 | attributes #1 = { nounwind readnone } |
| Matt Arsenault | ad55ee5 | 2016-12-06 01:02:51 +0000 | [diff] [blame] | 754 | attributes #2 = { nounwind convergent } |