Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 1 | ; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s |
| 2 | ; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=CIVI %s |
| 3 | ; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=CIVI %s |
| 4 | |
| 5 | ; GCN-LABEL: {{^}}s_shl_v2i16: |
| 6 | ; GFX9: s_load_dword [[LHS:s[0-9]+]] |
| 7 | ; GFX9: s_load_dword [[RHS:s[0-9]+]] |
| 8 | ; GFX9: v_mov_b32_e32 [[VLHS:v[0-9]+]], [[LHS]] |
| 9 | ; GFX9: v_pk_lshlrev_b16 [[RESULT:v[0-9]+]], [[RHS]], [[VLHS]] |
| 10 | |
| 11 | ; CIVI: v_lshlrev_b32_e32 |
| 12 | ; CIVI: v_and_b32_e32 v{{[0-9]+}}, 0xffff, v{{[0-9]+}} |
| 13 | ; CIVI: v_lshlrev_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} |
| 14 | ; CIVI: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}} |
| 15 | ; CIVI: v_or_b32_e32 |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame^] | 16 | define amdgpu_kernel void @s_shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %lhs, <2 x i16> %rhs) #0 { |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 17 | %result = shl <2 x i16> %lhs, %rhs |
| 18 | store <2 x i16> %result, <2 x i16> addrspace(1)* %out |
| 19 | ret void |
| 20 | } |
| 21 | |
| 22 | ; GCN-LABEL: {{^}}v_shl_v2i16: |
| 23 | ; GCN: {{buffer|flat}}_load_dword [[LHS:v[0-9]+]] |
| 24 | ; GCN: {{buffer|flat}}_load_dword [[RHS:v[0-9]+]] |
| 25 | ; GFX9: v_pk_lshlrev_b16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]] |
| 26 | |
| 27 | ; VI: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}} |
| 28 | ; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} |
| 29 | ; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} |
| 30 | ; VI: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}} |
| 31 | ; VI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} |
| 32 | |
| 33 | ; CI: s_mov_b32 [[MASK:s[0-9]+]], 0xffff{{$}} |
| 34 | ; CI: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, [[LHS]] |
| 35 | ; CI: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}} |
| 36 | ; CI: v_lshlrev_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} |
| 37 | ; CI: v_lshl_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} |
| 38 | ; CI: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}} |
| 39 | ; CI: v_and_b32_e32 v{{[0-9]+}}, [[MASK]], v{{[0-9]+}} |
| 40 | ; CI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame^] | 41 | define amdgpu_kernel void @v_shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 { |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 42 | %tid = call i32 @llvm.amdgcn.workitem.id.x() |
| 43 | %tid.ext = sext i32 %tid to i64 |
| 44 | %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext |
| 45 | %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext |
| 46 | %b_ptr = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %in.gep, i32 1 |
| 47 | %a = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep |
| 48 | %b = load <2 x i16>, <2 x i16> addrspace(1)* %b_ptr |
| 49 | %result = shl <2 x i16> %a, %b |
| 50 | store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep |
| 51 | ret void |
| 52 | } |
| 53 | |
| 54 | ; GCN-LABEL: {{^}}shl_v_s_v2i16: |
| 55 | ; GFX9: s_load_dword [[RHS:s[0-9]+]] |
| 56 | ; GFX9: {{buffer|flat}}_load_dword [[LHS:v[0-9]+]] |
| 57 | ; GFX9: v_pk_lshlrev_b16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]] |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame^] | 58 | define amdgpu_kernel void @shl_v_s_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in, <2 x i16> %sgpr) #0 { |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 59 | %tid = call i32 @llvm.amdgcn.workitem.id.x() |
| 60 | %tid.ext = sext i32 %tid to i64 |
| 61 | %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext |
| 62 | %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext |
| 63 | %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep |
| 64 | %result = shl <2 x i16> %vgpr, %sgpr |
| 65 | store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep |
| 66 | ret void |
| 67 | } |
| 68 | |
| 69 | ; GCN-LABEL: {{^}}shl_s_v_v2i16: |
| 70 | ; GFX9: s_load_dword [[LHS:s[0-9]+]] |
| 71 | ; GFX9: {{buffer|flat}}_load_dword [[RHS:v[0-9]+]] |
| 72 | ; GFX9: v_pk_lshlrev_b16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]] |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame^] | 73 | define amdgpu_kernel void @shl_s_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in, <2 x i16> %sgpr) #0 { |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 74 | %tid = call i32 @llvm.amdgcn.workitem.id.x() |
| 75 | %tid.ext = sext i32 %tid to i64 |
| 76 | %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext |
| 77 | %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext |
| 78 | %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep |
| 79 | %result = shl <2 x i16> %sgpr, %vgpr |
| 80 | store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep |
| 81 | ret void |
| 82 | } |
| 83 | |
| 84 | ; GCN-LABEL: {{^}}shl_imm_v_v2i16: |
| 85 | ; GCN: {{buffer|flat}}_load_dword [[RHS:v[0-9]+]] |
| 86 | ; GFX9: v_pk_lshlrev_b16 [[RESULT:v[0-9]+]], [[RHS]], 8 |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame^] | 87 | define amdgpu_kernel void @shl_imm_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 { |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 88 | %tid = call i32 @llvm.amdgcn.workitem.id.x() |
| 89 | %tid.ext = sext i32 %tid to i64 |
| 90 | %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext |
| 91 | %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext |
| 92 | %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep |
| 93 | %result = shl <2 x i16> <i16 8, i16 8>, %vgpr |
| 94 | store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep |
| 95 | ret void |
| 96 | } |
| 97 | |
| 98 | ; GCN-LABEL: {{^}}shl_v_imm_v2i16: |
| 99 | ; GCN: {{buffer|flat}}_load_dword [[LHS:v[0-9]+]] |
| 100 | ; GFX9: v_pk_lshlrev_b16 [[RESULT:v[0-9]+]], 8, [[LHS]] |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame^] | 101 | define amdgpu_kernel void @shl_v_imm_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 { |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 102 | %tid = call i32 @llvm.amdgcn.workitem.id.x() |
| 103 | %tid.ext = sext i32 %tid to i64 |
| 104 | %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext |
| 105 | %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext |
| 106 | %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep |
| 107 | %result = shl <2 x i16> %vgpr, <i16 8, i16 8> |
| 108 | store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep |
| 109 | ret void |
| 110 | } |
| 111 | |
| 112 | ; GCN-LABEL: {{^}}v_shl_v4i16: |
| 113 | ; GCN: {{buffer|flat}}_load_dwordx2 |
| 114 | ; GCN: {{buffer|flat}}_load_dwordx2 |
| 115 | ; GFX9: v_pk_lshlrev_b16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} |
| 116 | ; GFX9: v_pk_lshlrev_b16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} |
| 117 | ; GCN: {{buffer|flat}}_store_dwordx2 |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame^] | 118 | define amdgpu_kernel void @v_shl_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 { |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 119 | %tid = call i32 @llvm.amdgcn.workitem.id.x() |
| 120 | %tid.ext = sext i32 %tid to i64 |
| 121 | %in.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %in, i64 %tid.ext |
| 122 | %out.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %out, i64 %tid.ext |
| 123 | %b_ptr = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %in.gep, i32 1 |
| 124 | %a = load <4 x i16>, <4 x i16> addrspace(1)* %in.gep |
| 125 | %b = load <4 x i16>, <4 x i16> addrspace(1)* %b_ptr |
| 126 | %result = shl <4 x i16> %a, %b |
| 127 | store <4 x i16> %result, <4 x i16> addrspace(1)* %out.gep |
| 128 | ret void |
| 129 | } |
| 130 | |
| 131 | ; GCN-LABEL: {{^}}shl_v_imm_v4i16: |
| 132 | ; GCN: {{buffer|flat}}_load_dwordx2 |
| 133 | ; GFX9: v_pk_lshlrev_b16 v{{[0-9]+}}, 8, v{{[0-9]+}} |
| 134 | ; GFX9: v_pk_lshlrev_b16 v{{[0-9]+}}, 8, v{{[0-9]+}} |
| 135 | ; GCN: {{buffer|flat}}_store_dwordx2 |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame^] | 136 | define amdgpu_kernel void @shl_v_imm_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 { |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 137 | %tid = call i32 @llvm.amdgcn.workitem.id.x() |
| 138 | %tid.ext = sext i32 %tid to i64 |
| 139 | %in.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %in, i64 %tid.ext |
| 140 | %out.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %out, i64 %tid.ext |
| 141 | %vgpr = load <4 x i16>, <4 x i16> addrspace(1)* %in.gep |
| 142 | %result = shl <4 x i16> %vgpr, <i16 8, i16 8, i16 8, i16 8> |
| 143 | store <4 x i16> %result, <4 x i16> addrspace(1)* %out.gep |
| 144 | ret void |
| 145 | } |
| 146 | |
| 147 | declare i32 @llvm.amdgcn.workitem.id.x() #1 |
| 148 | |
| 149 | attributes #0 = { nounwind } |
| 150 | attributes #1 = { nounwind readnone } |