blob: 0328ce31002df9aace971435d844ea1cd52e69fc [file] [log] [blame]
Tom Stellard115a6152016-11-10 16:02:37 +00001; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
Matt Arsenault7aad8fd2017-01-24 22:02:15 +00002; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
Matt Arsenault364a6742014-06-11 17:50:44 +00003
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +00004declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
5declare i32 @llvm.amdgcn.workitem.id.y() nounwind readnone
6
Tom Stellard115a6152016-11-10 16:02:37 +00007; GCN-LABEL: {{^}}load_i8_to_f32:
NAKAMURA Takumie4a74132017-07-04 02:14:18 +00008; GCN: buffer_load_ubyte [[LOADREG:v[0-9]+]],
Tom Stellard115a6152016-11-10 16:02:37 +00009; GCN-NOT: bfe
10; GCN-NOT: lshr
11; GCN: v_cvt_f32_ubyte0_e32 [[CONV:v[0-9]+]], [[LOADREG]]
12; GCN: buffer_store_dword [[CONV]],
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000013define amdgpu_kernel void @load_i8_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* noalias %in) nounwind {
NAKAMURA Takumie4a74132017-07-04 02:14:18 +000014 %load = load i8, i8 addrspace(1)* %in, align 1
Matt Arsenault364a6742014-06-11 17:50:44 +000015 %cvt = uitofp i8 %load to float
16 store float %cvt, float addrspace(1)* %out, align 4
17 ret void
18}
19
Tom Stellard115a6152016-11-10 16:02:37 +000020; GCN-LABEL: {{^}}load_v2i8_to_v2f32:
NAKAMURA Takumie4a74132017-07-04 02:14:18 +000021; GCN: buffer_load_ushort [[LD:v[0-9]+]]
Tom Stellard115a6152016-11-10 16:02:37 +000022; GCN-DAG: v_cvt_f32_ubyte1_e32 v[[HIRESULT:[0-9]+]], [[LD]]
23; GCN-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]], [[LD]]
24; GCN: buffer_store_dwordx2 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000025define amdgpu_kernel void @load_v2i8_to_v2f32(<2 x float> addrspace(1)* noalias %out, <2 x i8> addrspace(1)* noalias %in) nounwind {
NAKAMURA Takumie4a74132017-07-04 02:14:18 +000026 %load = load <2 x i8>, <2 x i8> addrspace(1)* %in, align 2
Matt Arsenault364a6742014-06-11 17:50:44 +000027 %cvt = uitofp <2 x i8> %load to <2 x float>
28 store <2 x float> %cvt, <2 x float> addrspace(1)* %out, align 16
29 ret void
30}
31
Tom Stellard115a6152016-11-10 16:02:37 +000032; GCN-LABEL: {{^}}load_v3i8_to_v3f32:
NAKAMURA Takumie4a74132017-07-04 02:14:18 +000033; GCN: buffer_load_dword [[VAL:v[0-9]+]]
Tom Stellard115a6152016-11-10 16:02:37 +000034; GCN-NOT: v_cvt_f32_ubyte3_e32
35; GCN-DAG: v_cvt_f32_ubyte2_e32 v{{[0-9]+}}, [[VAL]]
36; GCN-DAG: v_cvt_f32_ubyte1_e32 v[[HIRESULT:[0-9]+]], [[VAL]]
37; GCN-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]], [[VAL]]
38; GCN: buffer_store_dwordx2 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000039define amdgpu_kernel void @load_v3i8_to_v3f32(<3 x float> addrspace(1)* noalias %out, <3 x i8> addrspace(1)* noalias %in) nounwind {
NAKAMURA Takumie4a74132017-07-04 02:14:18 +000040 %load = load <3 x i8>, <3 x i8> addrspace(1)* %in, align 4
Matt Arsenault364a6742014-06-11 17:50:44 +000041 %cvt = uitofp <3 x i8> %load to <3 x float>
42 store <3 x float> %cvt, <3 x float> addrspace(1)* %out, align 16
43 ret void
44}
45
Tom Stellard115a6152016-11-10 16:02:37 +000046; GCN-LABEL: {{^}}load_v4i8_to_v4f32:
NAKAMURA Takumie4a74132017-07-04 02:14:18 +000047; GCN: buffer_load_dword [[LOADREG:v[0-9]+]]
Tom Stellard115a6152016-11-10 16:02:37 +000048; GCN-NOT: bfe
49; GCN-NOT: lshr
50; GCN-DAG: v_cvt_f32_ubyte3_e32 v[[HIRESULT:[0-9]+]], [[LOADREG]]
51; GCN-DAG: v_cvt_f32_ubyte2_e32 v{{[0-9]+}}, [[LOADREG]]
52; GCN-DAG: v_cvt_f32_ubyte1_e32 v{{[0-9]+}}, [[LOADREG]]
53; GCN-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]], [[LOADREG]]
54; GCN: buffer_store_dwordx4 v{{\[}}[[LORESULT]]:[[HIRESULT]]{{\]}},
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000055define amdgpu_kernel void @load_v4i8_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
NAKAMURA Takumie4a74132017-07-04 02:14:18 +000056 %load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
Matt Arsenaultbd223422015-01-14 01:35:17 +000057 %cvt = uitofp <4 x i8> %load to <4 x float>
58 store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
59 ret void
60}
61
62; This should not be adding instructions to shift into the correct
63; position in the word for the component.
64
Matt Arsenault8af47a02016-07-01 22:55:55 +000065; FIXME: Packing bytes
Tom Stellard115a6152016-11-10 16:02:37 +000066; GCN-LABEL: {{^}}load_v4i8_to_v4f32_unaligned:
NAKAMURA Takumie4a74132017-07-04 02:14:18 +000067; GCN: buffer_load_ubyte [[LOADREG3:v[0-9]+]]
68; GCN: buffer_load_ubyte [[LOADREG2:v[0-9]+]]
69; GCN: buffer_load_ubyte [[LOADREG1:v[0-9]+]]
70; GCN: buffer_load_ubyte [[LOADREG0:v[0-9]+]]
Tom Stellard115a6152016-11-10 16:02:37 +000071; GCN-DAG: v_lshlrev_b32
72; GCN-DAG: v_or_b32
73; GCN-DAG: v_cvt_f32_ubyte0_e32 v[[LORESULT:[0-9]+]],
74; GCN-DAG: v_cvt_f32_ubyte0_e32 v{{[0-9]+}},
75; GCN-DAG: v_cvt_f32_ubyte0_e32 v{{[0-9]+}},
76; GCN-DAG: v_cvt_f32_ubyte0_e32 v[[HIRESULT:[0-9]+]]
Matt Arsenaultbd223422015-01-14 01:35:17 +000077
Tom Stellard115a6152016-11-10 16:02:37 +000078; GCN: buffer_store_dwordx4
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000079define amdgpu_kernel void @load_v4i8_to_v4f32_unaligned(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
NAKAMURA Takumie4a74132017-07-04 02:14:18 +000080 %load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 1
Matt Arsenault364a6742014-06-11 17:50:44 +000081 %cvt = uitofp <4 x i8> %load to <4 x float>
82 store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
83 ret void
84}
85
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +000086; FIXME: Need to handle non-uniform case for function below (load without gep).
Matt Arsenault327bb5a2016-07-01 22:47:50 +000087; Instructions still emitted to repack bytes for add use.
Matt Arsenault364a6742014-06-11 17:50:44 +000088
Tom Stellard115a6152016-11-10 16:02:37 +000089; GCN-LABEL: {{^}}load_v4i8_to_v4f32_2_uses:
90; GCN: {{buffer|flat}}_load_dword
91; GCN-DAG: v_cvt_f32_ubyte0_e32
92; GCN-DAG: v_cvt_f32_ubyte1_e32
93; GCN-DAG: v_cvt_f32_ubyte2_e32
94; GCN-DAG: v_cvt_f32_ubyte3_e32
95
96; GCN-DAG: v_lshrrev_b32_e32 v{{[0-9]+}}, 24
Tom Stellard115a6152016-11-10 16:02:37 +000097
Matt Arsenault327bb5a2016-07-01 22:47:50 +000098; SI-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 16
99; SI-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 8
100; SI-DAG: v_and_b32_e32 v{{[0-9]+}}, 0xffff,
Nirav Davea81682a2016-10-13 20:23:25 +0000101; SI-DAG: v_and_b32_e32 v{{[0-9]+}}, 0xff00,
Matt Arsenault327bb5a2016-07-01 22:47:50 +0000102; SI-DAG: v_add_i32
103
Tom Stellard115a6152016-11-10 16:02:37 +0000104; VI-DAG: v_and_b32_e32 v{{[0-9]+}}, 0xffffff00,
105; VI-DAG: v_add_u16_e32
106; VI-DAG: v_add_u16_e32
Matt Arsenault327bb5a2016-07-01 22:47:50 +0000107
Tom Stellard115a6152016-11-10 16:02:37 +0000108; GCN: {{buffer|flat}}_store_dwordx4
109; GCN: {{buffer|flat}}_store_dword
110
111; GCN: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000112define amdgpu_kernel void @load_v4i8_to_v4f32_2_uses(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %out2, <4 x i8> addrspace(1)* noalias %in) nounwind {
Konstantin Zhuravlyovf74fc602016-10-07 14:22:58 +0000113 %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
114 %in.ptr = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid.x
115 %load = load <4 x i8>, <4 x i8> addrspace(1)* %in.ptr, align 4
Matt Arsenault364a6742014-06-11 17:50:44 +0000116 %cvt = uitofp <4 x i8> %load to <4 x float>
117 store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
118 %add = add <4 x i8> %load, <i8 9, i8 9, i8 9, i8 9> ; Second use of %load
119 store <4 x i8> %add, <4 x i8> addrspace(1)* %out2, align 4
120 ret void
121}
122
123; Make sure this doesn't crash.
Tom Stellard115a6152016-11-10 16:02:37 +0000124; GCN-LABEL: {{^}}load_v7i8_to_v7f32:
125; GCN: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000126define amdgpu_kernel void @load_v7i8_to_v7f32(<7 x float> addrspace(1)* noalias %out, <7 x i8> addrspace(1)* noalias %in) nounwind {
NAKAMURA Takumie4a74132017-07-04 02:14:18 +0000127 %load = load <7 x i8>, <7 x i8> addrspace(1)* %in, align 1
Matt Arsenault364a6742014-06-11 17:50:44 +0000128 %cvt = uitofp <7 x i8> %load to <7 x float>
129 store <7 x float> %cvt, <7 x float> addrspace(1)* %out, align 16
130 ret void
131}
132
Tom Stellard115a6152016-11-10 16:02:37 +0000133; GCN-LABEL: {{^}}load_v8i8_to_v8f32:
NAKAMURA Takumie4a74132017-07-04 02:14:18 +0000134; GCN: buffer_load_dwordx2 v{{\[}}[[LOLOAD:[0-9]+]]:[[HILOAD:[0-9]+]]{{\]}},
Tom Stellard115a6152016-11-10 16:02:37 +0000135; GCN-NOT: bfe
136; GCN-NOT: lshr
137; GCN-DAG: v_cvt_f32_ubyte3_e32 v{{[0-9]+}}, v[[LOLOAD]]
138; GCN-DAG: v_cvt_f32_ubyte2_e32 v{{[0-9]+}}, v[[LOLOAD]]
139; GCN-DAG: v_cvt_f32_ubyte1_e32 v{{[0-9]+}}, v[[LOLOAD]]
140; GCN-DAG: v_cvt_f32_ubyte0_e32 v{{[0-9]+}}, v[[LOLOAD]]
141; GCN-DAG: v_cvt_f32_ubyte3_e32 v{{[0-9]+}}, v[[HILOAD]]
142; GCN-DAG: v_cvt_f32_ubyte2_e32 v{{[0-9]+}}, v[[HILOAD]]
143; GCN-DAG: v_cvt_f32_ubyte1_e32 v{{[0-9]+}}, v[[HILOAD]]
144; GCN-DAG: v_cvt_f32_ubyte0_e32 v{{[0-9]+}}, v[[HILOAD]]
145; GCN-NOT: bfe
146; GCN-NOT: lshr
147; GCN: buffer_store_dwordx4
148; GCN: buffer_store_dwordx4
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000149define amdgpu_kernel void @load_v8i8_to_v8f32(<8 x float> addrspace(1)* noalias %out, <8 x i8> addrspace(1)* noalias %in) nounwind {
NAKAMURA Takumie4a74132017-07-04 02:14:18 +0000150 %load = load <8 x i8>, <8 x i8> addrspace(1)* %in, align 8
Matt Arsenault364a6742014-06-11 17:50:44 +0000151 %cvt = uitofp <8 x i8> %load to <8 x float>
152 store <8 x float> %cvt, <8 x float> addrspace(1)* %out, align 16
153 ret void
154}
155
Tom Stellard115a6152016-11-10 16:02:37 +0000156; GCN-LABEL: {{^}}i8_zext_inreg_i32_to_f32:
NAKAMURA Takumie4a74132017-07-04 02:14:18 +0000157; GCN: buffer_load_dword [[LOADREG:v[0-9]+]],
Tom Stellard115a6152016-11-10 16:02:37 +0000158; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, 2, [[LOADREG]]
159; GCN-NEXT: v_cvt_f32_ubyte0_e32 [[CONV:v[0-9]+]], [[ADD]]
160; GCN: buffer_store_dword [[CONV]],
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000161define amdgpu_kernel void @i8_zext_inreg_i32_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
NAKAMURA Takumie4a74132017-07-04 02:14:18 +0000162 %load = load i32, i32 addrspace(1)* %in, align 4
Matt Arsenault364a6742014-06-11 17:50:44 +0000163 %add = add i32 %load, 2
164 %inreg = and i32 %add, 255
165 %cvt = uitofp i32 %inreg to float
166 store float %cvt, float addrspace(1)* %out, align 4
167 ret void
168}
169
Tom Stellard115a6152016-11-10 16:02:37 +0000170; GCN-LABEL: {{^}}i8_zext_inreg_hi1_to_f32:
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000171define amdgpu_kernel void @i8_zext_inreg_hi1_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
NAKAMURA Takumie4a74132017-07-04 02:14:18 +0000172 %load = load i32, i32 addrspace(1)* %in, align 4
Matt Arsenault364a6742014-06-11 17:50:44 +0000173 %inreg = and i32 %load, 65280
174 %shr = lshr i32 %inreg, 8
175 %cvt = uitofp i32 %shr to float
176 store float %cvt, float addrspace(1)* %out, align 4
177 ret void
178}
179
Matt Arsenault364a6742014-06-11 17:50:44 +0000180; We don't get these ones because of the zext, but instcombine removes
181; them so it shouldn't really matter.
Tom Stellard115a6152016-11-10 16:02:37 +0000182; GCN-LABEL: {{^}}i8_zext_i32_to_f32:
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000183define amdgpu_kernel void @i8_zext_i32_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* noalias %in) nounwind {
NAKAMURA Takumie4a74132017-07-04 02:14:18 +0000184 %load = load i8, i8 addrspace(1)* %in, align 1
Matt Arsenault364a6742014-06-11 17:50:44 +0000185 %ext = zext i8 %load to i32
186 %cvt = uitofp i32 %ext to float
187 store float %cvt, float addrspace(1)* %out, align 4
188 ret void
189}
190
Tom Stellard115a6152016-11-10 16:02:37 +0000191; GCN-LABEL: {{^}}v4i8_zext_v4i32_to_v4f32:
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000192define amdgpu_kernel void @v4i8_zext_v4i32_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind {
NAKAMURA Takumie4a74132017-07-04 02:14:18 +0000193 %load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 1
Matt Arsenault364a6742014-06-11 17:50:44 +0000194 %ext = zext <4 x i8> %load to <4 x i32>
195 %cvt = uitofp <4 x i32> %ext to <4 x float>
196 store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16
197 ret void
198}
Matt Arsenaulta949dc62016-05-09 16:29:50 +0000199
Tom Stellard115a6152016-11-10 16:02:37 +0000200; GCN-LABEL: {{^}}extract_byte0_to_f32:
NAKAMURA Takumie4a74132017-07-04 02:14:18 +0000201; GCN: buffer_load_dword [[VAL:v[0-9]+]]
Tom Stellard115a6152016-11-10 16:02:37 +0000202; GCN-NOT: [[VAL]]
203; GCN: v_cvt_f32_ubyte0_e32 [[CONV:v[0-9]+]], [[VAL]]
204; GCN: buffer_store_dword [[CONV]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000205define amdgpu_kernel void @extract_byte0_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
NAKAMURA Takumie4a74132017-07-04 02:14:18 +0000206 %val = load i32, i32 addrspace(1)* %in
Matt Arsenaulta949dc62016-05-09 16:29:50 +0000207 %and = and i32 %val, 255
208 %cvt = uitofp i32 %and to float
209 store float %cvt, float addrspace(1)* %out
210 ret void
211}
212
Tom Stellard115a6152016-11-10 16:02:37 +0000213; GCN-LABEL: {{^}}extract_byte1_to_f32:
NAKAMURA Takumie4a74132017-07-04 02:14:18 +0000214; GCN: buffer_load_dword [[VAL:v[0-9]+]]
Tom Stellard115a6152016-11-10 16:02:37 +0000215; GCN-NOT: [[VAL]]
216; GCN: v_cvt_f32_ubyte1_e32 [[CONV:v[0-9]+]], [[VAL]]
217; GCN: buffer_store_dword [[CONV]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000218define amdgpu_kernel void @extract_byte1_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
NAKAMURA Takumie4a74132017-07-04 02:14:18 +0000219 %val = load i32, i32 addrspace(1)* %in
Matt Arsenaulta949dc62016-05-09 16:29:50 +0000220 %srl = lshr i32 %val, 8
221 %and = and i32 %srl, 255
222 %cvt = uitofp i32 %and to float
223 store float %cvt, float addrspace(1)* %out
224 ret void
225}
226
Tom Stellard115a6152016-11-10 16:02:37 +0000227; GCN-LABEL: {{^}}extract_byte2_to_f32:
NAKAMURA Takumie4a74132017-07-04 02:14:18 +0000228; GCN: buffer_load_dword [[VAL:v[0-9]+]]
Tom Stellard115a6152016-11-10 16:02:37 +0000229; GCN-NOT: [[VAL]]
230; GCN: v_cvt_f32_ubyte2_e32 [[CONV:v[0-9]+]], [[VAL]]
231; GCN: buffer_store_dword [[CONV]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000232define amdgpu_kernel void @extract_byte2_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
NAKAMURA Takumie4a74132017-07-04 02:14:18 +0000233 %val = load i32, i32 addrspace(1)* %in
Matt Arsenaulta949dc62016-05-09 16:29:50 +0000234 %srl = lshr i32 %val, 16
235 %and = and i32 %srl, 255
236 %cvt = uitofp i32 %and to float
237 store float %cvt, float addrspace(1)* %out
238 ret void
239}
240
Tom Stellard115a6152016-11-10 16:02:37 +0000241; GCN-LABEL: {{^}}extract_byte3_to_f32:
NAKAMURA Takumie4a74132017-07-04 02:14:18 +0000242; GCN: buffer_load_dword [[VAL:v[0-9]+]]
Tom Stellard115a6152016-11-10 16:02:37 +0000243; GCN-NOT: [[VAL]]
244; GCN: v_cvt_f32_ubyte3_e32 [[CONV:v[0-9]+]], [[VAL]]
245; GCN: buffer_store_dword [[CONV]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000246define amdgpu_kernel void @extract_byte3_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind {
NAKAMURA Takumie4a74132017-07-04 02:14:18 +0000247 %val = load i32, i32 addrspace(1)* %in
Matt Arsenaulta949dc62016-05-09 16:29:50 +0000248 %srl = lshr i32 %val, 24
249 %and = and i32 %srl, 255
250 %cvt = uitofp i32 %and to float
251 store float %cvt, float addrspace(1)* %out
252 ret void
253}