blob: b49f507d026b95adc1de576ff9d60c3a72fe6585 [file] [log] [blame]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00001; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
Simon Pilgrim84846982017-08-01 15:14:35 +00002; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=GENERIC
Simon Pilgrim946f08c2017-05-06 13:46:09 +00003; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL
Gadi Haber767d98b2017-08-30 08:08:50 +00004; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=SKYLAKE
Simon Pilgrim946f08c2017-05-06 13:46:09 +00005; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=ZNVER1
6
Simon Pilgrimd2d2b372017-09-12 12:59:20 +00007define <8 x i32> @test_broadcasti128(<8 x i32> %a0, <4 x i32> *%a1) {
8; GENERIC-LABEL: test_broadcasti128:
9; GENERIC: # BB#0:
10; GENERIC-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] sched: [4:0.50]
11; GENERIC-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
12; GENERIC-NEXT: retq # sched: [1:1.00]
13;
14; HASWELL-LABEL: test_broadcasti128:
15; HASWELL: # BB#0:
16; HASWELL-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] sched: [1:0.50]
17; HASWELL-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
18; HASWELL-NEXT: retq # sched: [2:1.00]
19;
20; SKYLAKE-LABEL: test_broadcasti128:
21; SKYLAKE: # BB#0:
22; SKYLAKE-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] sched: [1:0.50]
23; SKYLAKE-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
24; SKYLAKE-NEXT: retq # sched: [2:1.00]
25;
26; ZNVER1-LABEL: test_broadcasti128:
27; ZNVER1: # BB#0:
28; ZNVER1-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] sched: [8:0.50]
29; ZNVER1-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
30; ZNVER1-NEXT: retq # sched: [1:0.50]
31 %1 = load <4 x i32>, <4 x i32> *%a1, align 16
32 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
33 %3 = add <8 x i32> %2, %a0
34 ret <8 x i32> %3
35}
36
Simon Pilgrim5a931c62017-09-12 11:17:01 +000037define <4 x double> @test_broadcastsd_ymm(<2 x double> %a0) {
38; GENERIC-LABEL: test_broadcastsd_ymm:
39; GENERIC: # BB#0:
40; GENERIC-NEXT: vbroadcastsd %xmm0, %ymm0 # sched: [1:1.00]
41; GENERIC-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
42; GENERIC-NEXT: retq # sched: [1:1.00]
43;
44; HASWELL-LABEL: test_broadcastsd_ymm:
45; HASWELL: # BB#0:
46; HASWELL-NEXT: vbroadcastsd %xmm0, %ymm0 # sched: [3:1.00]
47; HASWELL-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
48; HASWELL-NEXT: retq # sched: [2:1.00]
49;
50; SKYLAKE-LABEL: test_broadcastsd_ymm:
51; SKYLAKE: # BB#0:
52; SKYLAKE-NEXT: vbroadcastsd %xmm0, %ymm0 # sched: [3:1.00]
53; SKYLAKE-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
54; SKYLAKE-NEXT: retq # sched: [2:1.00]
55;
56; ZNVER1-LABEL: test_broadcastsd_ymm:
57; ZNVER1: # BB#0:
58; ZNVER1-NEXT: vbroadcastsd %xmm0, %ymm0 # sched: [100:0.25]
59; ZNVER1-NEXT: vaddpd %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
60; ZNVER1-NEXT: retq # sched: [1:0.50]
61 %1 = shufflevector <2 x double> %a0, <2 x double> undef, <4 x i32> zeroinitializer
62 %2 = fadd <4 x double> %1, %1
63 ret <4 x double> %2
64}
65
66define <4 x float> @test_broadcastss(<4 x float> %a0) {
67; GENERIC-LABEL: test_broadcastss:
68; GENERIC: # BB#0:
69; GENERIC-NEXT: vbroadcastss %xmm0, %xmm0 # sched: [1:1.00]
70; GENERIC-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
71; GENERIC-NEXT: retq # sched: [1:1.00]
72;
73; HASWELL-LABEL: test_broadcastss:
74; HASWELL: # BB#0:
75; HASWELL-NEXT: vbroadcastss %xmm0, %xmm0 # sched: [1:1.00]
76; HASWELL-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
77; HASWELL-NEXT: retq # sched: [2:1.00]
78;
79; SKYLAKE-LABEL: test_broadcastss:
80; SKYLAKE: # BB#0:
81; SKYLAKE-NEXT: vbroadcastss %xmm0, %xmm0 # sched: [1:1.00]
82; SKYLAKE-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
83; SKYLAKE-NEXT: retq # sched: [2:1.00]
84;
85; ZNVER1-LABEL: test_broadcastss:
86; ZNVER1: # BB#0:
87; ZNVER1-NEXT: vbroadcastss %xmm0, %xmm0 # sched: [1:0.50]
88; ZNVER1-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00]
89; ZNVER1-NEXT: retq # sched: [1:0.50]
90 %1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> zeroinitializer
91 %2 = fadd <4 x float> %1, %1
92 ret <4 x float> %2
93}
94
95define <8 x float> @test_broadcastss_ymm(<4 x float> %a0) {
96; GENERIC-LABEL: test_broadcastss_ymm:
97; GENERIC: # BB#0:
98; GENERIC-NEXT: vbroadcastss %xmm0, %ymm0 # sched: [1:1.00]
99; GENERIC-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
100; GENERIC-NEXT: retq # sched: [1:1.00]
101;
102; HASWELL-LABEL: test_broadcastss_ymm:
103; HASWELL: # BB#0:
104; HASWELL-NEXT: vbroadcastss %xmm0, %ymm0 # sched: [3:1.00]
105; HASWELL-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
106; HASWELL-NEXT: retq # sched: [2:1.00]
107;
108; SKYLAKE-LABEL: test_broadcastss_ymm:
109; SKYLAKE: # BB#0:
110; SKYLAKE-NEXT: vbroadcastss %xmm0, %ymm0 # sched: [3:1.00]
111; SKYLAKE-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
112; SKYLAKE-NEXT: retq # sched: [2:1.00]
113;
114; ZNVER1-LABEL: test_broadcastss_ymm:
115; ZNVER1: # BB#0:
116; ZNVER1-NEXT: vbroadcastss %xmm0, %ymm0 # sched: [100:0.25]
117; ZNVER1-NEXT: vaddps %ymm0, %ymm0, %ymm0 # sched: [3:1.00]
118; ZNVER1-NEXT: retq # sched: [1:0.50]
119 %1 = shufflevector <4 x float> %a0, <4 x float> undef, <8 x i32> zeroinitializer
120 %2 = fadd <8 x float> %1, %1
121 ret <8 x float> %2
122}
123
124define <4 x i32> @test_extracti128(<8 x i32> %a0, <8 x i32> %a1, <4 x i32> *%a2) {
125; GENERIC-LABEL: test_extracti128:
126; GENERIC: # BB#0:
127; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm2 # sched: [3:1.00]
128; GENERIC-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
129; GENERIC-NEXT: vextracti128 $1, %ymm0, %xmm0 # sched: [1:1.00]
130; GENERIC-NEXT: vextracti128 $1, %ymm2, (%rdi) # sched: [1:1.00]
131; GENERIC-NEXT: vzeroupper
132; GENERIC-NEXT: retq # sched: [1:1.00]
133;
134; HASWELL-LABEL: test_extracti128:
135; HASWELL: # BB#0:
136; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm2 # sched: [1:0.50]
137; HASWELL-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
138; HASWELL-NEXT: vextracti128 $1, %ymm0, %xmm0 # sched: [3:1.00]
139; HASWELL-NEXT: vextracti128 $1, %ymm2, (%rdi) # sched: [1:1.00]
140; HASWELL-NEXT: vzeroupper # sched: [4:1.00]
141; HASWELL-NEXT: retq # sched: [2:1.00]
142;
143; SKYLAKE-LABEL: test_extracti128:
144; SKYLAKE: # BB#0:
145; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm2 # sched: [1:0.50]
146; SKYLAKE-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
147; SKYLAKE-NEXT: vextracti128 $1, %ymm0, %xmm0 # sched: [3:1.00]
148; SKYLAKE-NEXT: vextracti128 $1, %ymm2, (%rdi) # sched: [1:1.00]
149; SKYLAKE-NEXT: vzeroupper # sched: [4:1.00]
150; SKYLAKE-NEXT: retq # sched: [2:1.00]
151;
152; ZNVER1-LABEL: test_extracti128:
153; ZNVER1: # BB#0:
154; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm2 # sched: [1:0.25]
155; ZNVER1-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
156; ZNVER1-NEXT: vextracti128 $1, %ymm0, %xmm0 # sched: [2:0.25]
157; ZNVER1-NEXT: vextracti128 $1, %ymm2, (%rdi) # sched: [1:0.50]
158; ZNVER1-NEXT: vzeroupper # sched: [100:?]
159; ZNVER1-NEXT: retq # sched: [1:0.50]
160 %1 = add <8 x i32> %a0, %a1
161 %2 = sub <8 x i32> %a0, %a1
162 %3 = shufflevector <8 x i32> %1, <8 x i32> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
163 %4 = shufflevector <8 x i32> %2, <8 x i32> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
164 store <4 x i32> %3, <4 x i32> *%a2
165 ret <4 x i32> %4
166}
167
Simon Pilgrim76418aa2017-09-12 15:52:01 +0000168define <2 x double> @test_gatherdpd(<2 x double> %a0, i8* %a1, <4 x i32> %a2, <2 x double> %a3) {
169; GENERIC-LABEL: test_gatherdpd:
170; GENERIC: # BB#0:
171; GENERIC-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0
172; GENERIC-NEXT: retq # sched: [1:1.00]
173;
174; HASWELL-LABEL: test_gatherdpd:
175; HASWELL: # BB#0:
176; HASWELL-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
177; HASWELL-NEXT: retq # sched: [2:1.00]
178;
179; SKYLAKE-LABEL: test_gatherdpd:
180; SKYLAKE: # BB#0:
181; SKYLAKE-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
182; SKYLAKE-NEXT: retq # sched: [2:1.00]
183;
184; ZNVER1-LABEL: test_gatherdpd:
185; ZNVER1: # BB#0:
186; ZNVER1-NEXT: vgatherdpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
187; ZNVER1-NEXT: retq # sched: [1:0.50]
188 %1 = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> %a0, i8* %a1, <4 x i32> %a2, <2 x double> %a3, i8 2)
189 ret <2 x double> %1
190}
191declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, i8*, <4 x i32>, <2 x double>, i8) nounwind readonly
192
193define <4 x double> @test_gatherdpd_ymm(<4 x double> %a0, i8* %a1, <4 x i32> %a2, <4 x double> %a3) {
194; GENERIC-LABEL: test_gatherdpd_ymm:
195; GENERIC: # BB#0:
196; GENERIC-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,8), %ymm0
197; GENERIC-NEXT: retq # sched: [1:1.00]
198;
199; HASWELL-LABEL: test_gatherdpd_ymm:
200; HASWELL: # BB#0:
201; HASWELL-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,8), %ymm0 # sched: [1:?]
202; HASWELL-NEXT: retq # sched: [2:1.00]
203;
204; SKYLAKE-LABEL: test_gatherdpd_ymm:
205; SKYLAKE: # BB#0:
206; SKYLAKE-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,8), %ymm0 # sched: [1:?]
207; SKYLAKE-NEXT: retq # sched: [2:1.00]
208;
209; ZNVER1-LABEL: test_gatherdpd_ymm:
210; ZNVER1: # BB#0:
211; ZNVER1-NEXT: vgatherdpd %ymm2, (%rdi,%xmm1,8), %ymm0 # sched: [100:?]
212; ZNVER1-NEXT: retq # sched: [1:0.50]
213 %1 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> %a0, i8* %a1, <4 x i32> %a2, <4 x double> %a3, i8 8)
214 ret <4 x double> %1
215}
216declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, i8*, <4 x i32>, <4 x double>, i8) nounwind readonly
217
218define <4 x float> @test_gatherdps(<4 x float> %a0, i8* %a1, <4 x i32> %a2, <4 x float> %a3) {
219; GENERIC-LABEL: test_gatherdps:
220; GENERIC: # BB#0:
221; GENERIC-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0
222; GENERIC-NEXT: retq # sched: [1:1.00]
223;
224; HASWELL-LABEL: test_gatherdps:
225; HASWELL: # BB#0:
226; HASWELL-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
227; HASWELL-NEXT: retq # sched: [2:1.00]
228;
229; SKYLAKE-LABEL: test_gatherdps:
230; SKYLAKE: # BB#0:
231; SKYLAKE-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
232; SKYLAKE-NEXT: retq # sched: [2:1.00]
233;
234; ZNVER1-LABEL: test_gatherdps:
235; ZNVER1: # BB#0:
236; ZNVER1-NEXT: vgatherdps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
237; ZNVER1-NEXT: retq # sched: [1:0.50]
238 %1 = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> %a0, i8* %a1, <4 x i32> %a2, <4 x float> %a3, i8 2)
239 ret <4 x float> %1
240}
241declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, i8*, <4 x i32>, <4 x float>, i8) nounwind readonly
242
243define <8 x float> @test_gatherdps_ymm(<8 x float> %a0, i8* %a1, <8 x i32> %a2, <8 x float> %a3) {
244; GENERIC-LABEL: test_gatherdps_ymm:
245; GENERIC: # BB#0:
246; GENERIC-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,4), %ymm0
247; GENERIC-NEXT: retq # sched: [1:1.00]
248;
249; HASWELL-LABEL: test_gatherdps_ymm:
250; HASWELL: # BB#0:
251; HASWELL-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,4), %ymm0 # sched: [1:?]
252; HASWELL-NEXT: retq # sched: [2:1.00]
253;
254; SKYLAKE-LABEL: test_gatherdps_ymm:
255; SKYLAKE: # BB#0:
256; SKYLAKE-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,4), %ymm0 # sched: [1:?]
257; SKYLAKE-NEXT: retq # sched: [2:1.00]
258;
259; ZNVER1-LABEL: test_gatherdps_ymm:
260; ZNVER1: # BB#0:
261; ZNVER1-NEXT: vgatherdps %ymm2, (%rdi,%ymm1,4), %ymm0 # sched: [100:?]
262; ZNVER1-NEXT: retq # sched: [1:0.50]
263 %1 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> %a0, i8* %a1, <8 x i32> %a2, <8 x float> %a3, i8 4)
264 ret <8 x float> %1
265}
266declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, i8*, <8 x i32>, <8 x float>, i8) nounwind readonly
267
268define <2 x double> @test_gatherqpd(<2 x double> %a0, i8* %a1, <2 x i64> %a2, <2 x double> %a3) {
269; GENERIC-LABEL: test_gatherqpd:
270; GENERIC: # BB#0:
271; GENERIC-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0
272; GENERIC-NEXT: retq # sched: [1:1.00]
273;
274; HASWELL-LABEL: test_gatherqpd:
275; HASWELL: # BB#0:
276; HASWELL-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
277; HASWELL-NEXT: retq # sched: [2:1.00]
278;
279; SKYLAKE-LABEL: test_gatherqpd:
280; SKYLAKE: # BB#0:
281; SKYLAKE-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
282; SKYLAKE-NEXT: retq # sched: [2:1.00]
283;
284; ZNVER1-LABEL: test_gatherqpd:
285; ZNVER1: # BB#0:
286; ZNVER1-NEXT: vgatherqpd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
287; ZNVER1-NEXT: retq # sched: [1:0.50]
288 %1 = call <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double> %a0, i8* %a1, <2 x i64> %a2, <2 x double> %a3, i8 2)
289 ret <2 x double> %1
290}
291declare <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double>, i8*, <2 x i64>, <2 x double>, i8) nounwind readonly
292
293define <4 x double> @test_gatherqpd_ymm(<4 x double> %a0, i8* %a1, <4 x i64> %a2, <4 x double> %a3) {
294; GENERIC-LABEL: test_gatherqpd_ymm:
295; GENERIC: # BB#0:
296; GENERIC-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,8), %ymm0
297; GENERIC-NEXT: retq # sched: [1:1.00]
298;
299; HASWELL-LABEL: test_gatherqpd_ymm:
300; HASWELL: # BB#0:
301; HASWELL-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,8), %ymm0 # sched: [1:?]
302; HASWELL-NEXT: retq # sched: [2:1.00]
303;
304; SKYLAKE-LABEL: test_gatherqpd_ymm:
305; SKYLAKE: # BB#0:
306; SKYLAKE-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,8), %ymm0 # sched: [1:?]
307; SKYLAKE-NEXT: retq # sched: [2:1.00]
308;
309; ZNVER1-LABEL: test_gatherqpd_ymm:
310; ZNVER1: # BB#0:
311; ZNVER1-NEXT: vgatherqpd %ymm2, (%rdi,%ymm1,8), %ymm0 # sched: [100:?]
312; ZNVER1-NEXT: retq # sched: [1:0.50]
313 %1 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> %a0, i8* %a1, <4 x i64> %a2, <4 x double> %a3, i8 8)
314 ret <4 x double> %1
315}
316declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double>, i8*, <4 x i64>, <4 x double>, i8) nounwind readonly
317
318define <4 x float> @test_gatherqps(<4 x float> %a0, i8* %a1, <2 x i64> %a2, <4 x float> %a3) {
319; GENERIC-LABEL: test_gatherqps:
320; GENERIC: # BB#0:
321; GENERIC-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0
322; GENERIC-NEXT: retq # sched: [1:1.00]
323;
324; HASWELL-LABEL: test_gatherqps:
325; HASWELL: # BB#0:
326; HASWELL-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
327; HASWELL-NEXT: retq # sched: [2:1.00]
328;
329; SKYLAKE-LABEL: test_gatherqps:
330; SKYLAKE: # BB#0:
331; SKYLAKE-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
332; SKYLAKE-NEXT: retq # sched: [2:1.00]
333;
334; ZNVER1-LABEL: test_gatherqps:
335; ZNVER1: # BB#0:
336; ZNVER1-NEXT: vgatherqps %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
337; ZNVER1-NEXT: retq # sched: [1:0.50]
338 %1 = call <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float> %a0, i8* %a1, <2 x i64> %a2, <4 x float> %a3, i8 2)
339 ret <4 x float> %1
340}
341declare <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float>, i8*, <2 x i64>, <4 x float>, i8) nounwind readonly
342
343define <4 x float> @test_gatherqps_ymm(<4 x float> %a0, i8* %a1, <4 x i64> %a2, <4 x float> %a3) {
344; GENERIC-LABEL: test_gatherqps_ymm:
345; GENERIC: # BB#0:
346; GENERIC-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,4), %xmm0
347; GENERIC-NEXT: vzeroupper
348; GENERIC-NEXT: retq # sched: [1:1.00]
349;
350; HASWELL-LABEL: test_gatherqps_ymm:
351; HASWELL: # BB#0:
352; HASWELL-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,4), %xmm0 # sched: [1:?]
353; HASWELL-NEXT: vzeroupper # sched: [4:1.00]
354; HASWELL-NEXT: retq # sched: [2:1.00]
355;
356; SKYLAKE-LABEL: test_gatherqps_ymm:
357; SKYLAKE: # BB#0:
358; SKYLAKE-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,4), %xmm0 # sched: [1:?]
359; SKYLAKE-NEXT: vzeroupper # sched: [4:1.00]
360; SKYLAKE-NEXT: retq # sched: [2:1.00]
361;
362; ZNVER1-LABEL: test_gatherqps_ymm:
363; ZNVER1: # BB#0:
364; ZNVER1-NEXT: vgatherqps %xmm2, (%rdi,%ymm1,4), %xmm0 # sched: [100:?]
365; ZNVER1-NEXT: vzeroupper # sched: [100:?]
366; ZNVER1-NEXT: retq # sched: [1:0.50]
367 %1 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> %a0, i8* %a1, <4 x i64> %a2, <4 x float> %a3, i8 4)
368 ret <4 x float> %1
369}
370declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float>, i8*, <4 x i64>, <4 x float>, i8) nounwind readonly
371
Simon Pilgrim5a931c62017-09-12 11:17:01 +0000372define <8 x i32> @test_inserti128(<8 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
373; GENERIC-LABEL: test_inserti128:
374; GENERIC: # BB#0:
375; GENERIC-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 # sched: [1:1.00]
376; GENERIC-NEXT: vinserti128 $1, (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
377; GENERIC-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
378; GENERIC-NEXT: retq # sched: [1:1.00]
379;
380; HASWELL-LABEL: test_inserti128:
381; HASWELL: # BB#0:
382; HASWELL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 # sched: [3:1.00]
383; HASWELL-NEXT: vinserti128 $1, (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
384; HASWELL-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
385; HASWELL-NEXT: retq # sched: [2:1.00]
386;
387; SKYLAKE-LABEL: test_inserti128:
388; SKYLAKE: # BB#0:
389; SKYLAKE-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 # sched: [3:1.00]
390; SKYLAKE-NEXT: vinserti128 $1, (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
391; SKYLAKE-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
392; SKYLAKE-NEXT: retq # sched: [2:1.00]
393;
394; ZNVER1-LABEL: test_inserti128:
395; ZNVER1: # BB#0:
396; ZNVER1-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 # sched: [2:0.25]
397; ZNVER1-NEXT: vinserti128 $1, (%rdi), %ymm0, %ymm0 # sched: [9:0.50]
398; ZNVER1-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
399; ZNVER1-NEXT: retq # sched: [1:0.50]
400 %1 = shufflevector <4 x i32> %a1, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
401 %2 = shufflevector <8 x i32> %a0, <8 x i32> %1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
402 %3 = load <4 x i32>, <4 x i32> *%a2, align 16
403 %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
404 %5 = shufflevector <8 x i32> %a0, <8 x i32> %4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
405 %6 = add <8 x i32> %2, %5
406 ret <8 x i32> %6
407}
408
Simon Pilgrim76418aa2017-09-12 15:52:01 +0000409define <4 x i64> @test_movntdqa(i8* %a0) {
410; GENERIC-LABEL: test_movntdqa:
411; GENERIC: # BB#0:
412; GENERIC-NEXT: vmovntdqa (%rdi), %ymm0 # sched: [4:0.50]
413; GENERIC-NEXT: retq # sched: [1:1.00]
414;
415; HASWELL-LABEL: test_movntdqa:
416; HASWELL: # BB#0:
417; HASWELL-NEXT: vmovntdqa (%rdi), %ymm0 # sched: [1:0.50]
418; HASWELL-NEXT: retq # sched: [2:1.00]
419;
420; SKYLAKE-LABEL: test_movntdqa:
421; SKYLAKE: # BB#0:
422; SKYLAKE-NEXT: vmovntdqa (%rdi), %ymm0 # sched: [1:0.50]
423; SKYLAKE-NEXT: retq # sched: [2:1.00]
424;
425; ZNVER1-LABEL: test_movntdqa:
426; ZNVER1: # BB#0:
427; ZNVER1-NEXT: vmovntdqa (%rdi), %ymm0 # sched: [8:0.50]
428; ZNVER1-NEXT: retq # sched: [1:0.50]
429 %1 = call <4 x i64> @llvm.x86.avx2.movntdqa(i8* %a0)
430 ret <4 x i64> %1
431}
432declare <4 x i64> @llvm.x86.avx2.movntdqa(i8*) nounwind readonly
433
Simon Pilgrim0af5a7722017-09-12 15:01:20 +0000434define <16 x i16> @test_mpsadbw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
435; GENERIC-LABEL: test_mpsadbw:
436; GENERIC: # BB#0:
437; GENERIC-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
438; GENERIC-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
439; GENERIC-NEXT: retq # sched: [1:1.00]
440;
441; HASWELL-LABEL: test_mpsadbw:
442; HASWELL: # BB#0:
443; HASWELL-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [7:2.00]
444; HASWELL-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [7:2.00]
445; HASWELL-NEXT: retq # sched: [2:1.00]
446;
447; SKYLAKE-LABEL: test_mpsadbw:
448; SKYLAKE: # BB#0:
449; SKYLAKE-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [7:2.00]
450; SKYLAKE-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [7:2.00]
451; SKYLAKE-NEXT: retq # sched: [2:1.00]
452;
453; ZNVER1-LABEL: test_mpsadbw:
454; ZNVER1: # BB#0:
455; ZNVER1-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [100:?]
456; ZNVER1-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [100:?]
457; ZNVER1-NEXT: retq # sched: [1:0.50]
458 %1 = call <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8> %a0, <32 x i8> %a1, i8 7)
459 %2 = bitcast <16 x i16> %1 to <32 x i8>
460 %3 = load <32 x i8>, <32 x i8> *%a2, align 32
461 %4 = call <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8> %2, <32 x i8> %3, i8 7)
462 ret <16 x i16> %4
463}
464declare <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8>, <32 x i8>, i8) nounwind readnone
465
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000466define <32 x i8> @test_pabsb(<32 x i8> %a0, <32 x i8> *%a1) {
Simon Pilgrim84846982017-08-01 15:14:35 +0000467; GENERIC-LABEL: test_pabsb:
468; GENERIC: # BB#0:
469; GENERIC-NEXT: vpabsb %ymm0, %ymm0 # sched: [3:1.00]
470; GENERIC-NEXT: vpabsb (%rdi), %ymm1 # sched: [7:1.00]
471; GENERIC-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
472; GENERIC-NEXT: retq # sched: [1:1.00]
473;
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000474; HASWELL-LABEL: test_pabsb:
475; HASWELL: # BB#0:
476; HASWELL-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.50]
Gadi Haberd76f7b82017-08-28 10:04:16 +0000477; HASWELL-NEXT: vpabsb (%rdi), %ymm1 # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000478; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
Gadi Haberd76f7b82017-08-28 10:04:16 +0000479; HASWELL-NEXT: retq # sched: [2:1.00]
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000480;
Gadi Haber767d98b2017-08-30 08:08:50 +0000481; SKYLAKE-LABEL: test_pabsb:
482; SKYLAKE: # BB#0:
483; SKYLAKE-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.50]
484; SKYLAKE-NEXT: vpabsb (%rdi), %ymm1 # sched: [1:0.50]
485; SKYLAKE-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
486; SKYLAKE-NEXT: retq # sched: [2:1.00]
487;
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000488; ZNVER1-LABEL: test_pabsb:
489; ZNVER1: # BB#0:
Craig Topper106b5b62017-07-19 02:45:14 +0000490; ZNVER1-NEXT: vpabsb (%rdi), %ymm1 # sched: [8:0.50]
491; ZNVER1-NEXT: vpabsb %ymm0, %ymm0 # sched: [1:0.25]
492; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
Ashutosh Nemabfcac0b2017-08-31 12:38:35 +0000493; ZNVER1-NEXT: retq # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000494 %1 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %a0)
495 %2 = load <32 x i8>, <32 x i8> *%a1, align 32
496 %3 = call <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8> %2)
497 %4 = or <32 x i8> %1, %3
498 ret <32 x i8> %4
499}
500declare <32 x i8> @llvm.x86.avx2.pabs.b(<32 x i8>) nounwind readnone
501
502define <8 x i32> @test_pabsd(<8 x i32> %a0, <8 x i32> *%a1) {
Simon Pilgrim84846982017-08-01 15:14:35 +0000503; GENERIC-LABEL: test_pabsd:
504; GENERIC: # BB#0:
505; GENERIC-NEXT: vpabsd %ymm0, %ymm0 # sched: [3:1.00]
506; GENERIC-NEXT: vpabsd (%rdi), %ymm1 # sched: [7:1.00]
507; GENERIC-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
508; GENERIC-NEXT: retq # sched: [1:1.00]
509;
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000510; HASWELL-LABEL: test_pabsd:
511; HASWELL: # BB#0:
512; HASWELL-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.50]
Gadi Haberd76f7b82017-08-28 10:04:16 +0000513; HASWELL-NEXT: vpabsd (%rdi), %ymm1 # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000514; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
Gadi Haberd76f7b82017-08-28 10:04:16 +0000515; HASWELL-NEXT: retq # sched: [2:1.00]
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000516;
Gadi Haber767d98b2017-08-30 08:08:50 +0000517; SKYLAKE-LABEL: test_pabsd:
518; SKYLAKE: # BB#0:
519; SKYLAKE-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.50]
520; SKYLAKE-NEXT: vpabsd (%rdi), %ymm1 # sched: [1:0.50]
521; SKYLAKE-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
522; SKYLAKE-NEXT: retq # sched: [2:1.00]
523;
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000524; ZNVER1-LABEL: test_pabsd:
525; ZNVER1: # BB#0:
Craig Topper106b5b62017-07-19 02:45:14 +0000526; ZNVER1-NEXT: vpabsd (%rdi), %ymm1 # sched: [8:0.50]
527; ZNVER1-NEXT: vpabsd %ymm0, %ymm0 # sched: [1:0.25]
528; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
Ashutosh Nemabfcac0b2017-08-31 12:38:35 +0000529; ZNVER1-NEXT: retq # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000530 %1 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %a0)
531 %2 = load <8 x i32>, <8 x i32> *%a1, align 32
532 %3 = call <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32> %2)
533 %4 = or <8 x i32> %1, %3
534 ret <8 x i32> %4
535}
536declare <8 x i32> @llvm.x86.avx2.pabs.d(<8 x i32>) nounwind readnone
537
538define <16 x i16> @test_pabsw(<16 x i16> %a0, <16 x i16> *%a1) {
Simon Pilgrim84846982017-08-01 15:14:35 +0000539; GENERIC-LABEL: test_pabsw:
540; GENERIC: # BB#0:
541; GENERIC-NEXT: vpabsw %ymm0, %ymm0 # sched: [3:1.00]
542; GENERIC-NEXT: vpabsw (%rdi), %ymm1 # sched: [7:1.00]
543; GENERIC-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
544; GENERIC-NEXT: retq # sched: [1:1.00]
545;
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000546; HASWELL-LABEL: test_pabsw:
547; HASWELL: # BB#0:
548; HASWELL-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.50]
Gadi Haberd76f7b82017-08-28 10:04:16 +0000549; HASWELL-NEXT: vpabsw (%rdi), %ymm1 # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000550; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
Gadi Haberd76f7b82017-08-28 10:04:16 +0000551; HASWELL-NEXT: retq # sched: [2:1.00]
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000552;
Gadi Haber767d98b2017-08-30 08:08:50 +0000553; SKYLAKE-LABEL: test_pabsw:
554; SKYLAKE: # BB#0:
555; SKYLAKE-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.50]
556; SKYLAKE-NEXT: vpabsw (%rdi), %ymm1 # sched: [1:0.50]
557; SKYLAKE-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
558; SKYLAKE-NEXT: retq # sched: [2:1.00]
559;
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000560; ZNVER1-LABEL: test_pabsw:
561; ZNVER1: # BB#0:
Craig Topper106b5b62017-07-19 02:45:14 +0000562; ZNVER1-NEXT: vpabsw (%rdi), %ymm1 # sched: [8:0.50]
563; ZNVER1-NEXT: vpabsw %ymm0, %ymm0 # sched: [1:0.25]
564; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
Ashutosh Nemabfcac0b2017-08-31 12:38:35 +0000565; ZNVER1-NEXT: retq # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000566 %1 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %a0)
567 %2 = load <16 x i16>, <16 x i16> *%a1, align 32
568 %3 = call <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16> %2)
569 %4 = or <16 x i16> %1, %3
570 ret <16 x i16> %4
571}
572declare <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16>) nounwind readnone
573
Simon Pilgrim0af5a7722017-09-12 15:01:20 +0000574define <16 x i16> @test_packssdw(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
575; GENERIC-LABEL: test_packssdw:
576; GENERIC: # BB#0:
577; GENERIC-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
578; GENERIC-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
579; GENERIC-NEXT: retq # sched: [1:1.00]
580;
581; HASWELL-LABEL: test_packssdw:
582; HASWELL: # BB#0:
583; HASWELL-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
584; HASWELL-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
585; HASWELL-NEXT: retq # sched: [2:1.00]
586;
587; SKYLAKE-LABEL: test_packssdw:
588; SKYLAKE: # BB#0:
589; SKYLAKE-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
590; SKYLAKE-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
591; SKYLAKE-NEXT: retq # sched: [2:1.00]
592;
593; ZNVER1-LABEL: test_packssdw:
594; ZNVER1: # BB#0:
595; ZNVER1-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
596; ZNVER1-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
597; ZNVER1-NEXT: retq # sched: [1:0.50]
598 %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a0, <8 x i32> %a1)
599 %2 = bitcast <16 x i16> %1 to <8 x i32>
600 %3 = load <8 x i32>, <8 x i32> *%a2, align 32
601 %4 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %2, <8 x i32> %3)
602 ret <16 x i16> %4
603}
604declare <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32>, <8 x i32>) nounwind readnone
605
606define <32 x i8> @test_packsswb(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
607; GENERIC-LABEL: test_packsswb:
608; GENERIC: # BB#0:
609; GENERIC-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
610; GENERIC-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
611; GENERIC-NEXT: retq # sched: [1:1.00]
612;
613; HASWELL-LABEL: test_packsswb:
614; HASWELL: # BB#0:
615; HASWELL-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
616; HASWELL-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
617; HASWELL-NEXT: retq # sched: [2:1.00]
618;
619; SKYLAKE-LABEL: test_packsswb:
620; SKYLAKE: # BB#0:
621; SKYLAKE-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
622; SKYLAKE-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
623; SKYLAKE-NEXT: retq # sched: [2:1.00]
624;
625; ZNVER1-LABEL: test_packsswb:
626; ZNVER1: # BB#0:
627; ZNVER1-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
628; ZNVER1-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
629; ZNVER1-NEXT: retq # sched: [1:0.50]
630 %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a0, <16 x i16> %a1)
631 %2 = bitcast <32 x i8> %1 to <16 x i16>
632 %3 = load <16 x i16>, <16 x i16> *%a2, align 32
633 %4 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %2, <16 x i16> %3)
634 ret <32 x i8> %4
635}
636declare <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16>, <16 x i16>) nounwind readnone
637
638define <16 x i16> @test_packusdw(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
639; GENERIC-LABEL: test_packusdw:
640; GENERIC: # BB#0:
641; GENERIC-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
642; GENERIC-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
643; GENERIC-NEXT: retq # sched: [1:1.00]
644;
645; HASWELL-LABEL: test_packusdw:
646; HASWELL: # BB#0:
647; HASWELL-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
648; HASWELL-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
649; HASWELL-NEXT: retq # sched: [2:1.00]
650;
651; SKYLAKE-LABEL: test_packusdw:
652; SKYLAKE: # BB#0:
653; SKYLAKE-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
654; SKYLAKE-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
655; SKYLAKE-NEXT: retq # sched: [2:1.00]
656;
657; ZNVER1-LABEL: test_packusdw:
658; ZNVER1: # BB#0:
659; ZNVER1-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
660; ZNVER1-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
661; ZNVER1-NEXT: retq # sched: [1:0.50]
662 %1 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a0, <8 x i32> %a1)
663 %2 = bitcast <16 x i16> %1 to <8 x i32>
664 %3 = load <8 x i32>, <8 x i32> *%a2, align 32
665 %4 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %2, <8 x i32> %3)
666 ret <16 x i16> %4
667}
668declare <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32>, <8 x i32>) nounwind readnone
669
670define <32 x i8> @test_packuswb(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
671; GENERIC-LABEL: test_packuswb:
672; GENERIC: # BB#0:
673; GENERIC-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
674; GENERIC-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
675; GENERIC-NEXT: retq # sched: [1:1.00]
676;
677; HASWELL-LABEL: test_packuswb:
678; HASWELL: # BB#0:
679; HASWELL-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
680; HASWELL-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
681; HASWELL-NEXT: retq # sched: [2:1.00]
682;
683; SKYLAKE-LABEL: test_packuswb:
684; SKYLAKE: # BB#0:
685; SKYLAKE-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
686; SKYLAKE-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
687; SKYLAKE-NEXT: retq # sched: [2:1.00]
688;
689; ZNVER1-LABEL: test_packuswb:
690; ZNVER1: # BB#0:
691; ZNVER1-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
692; ZNVER1-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
693; ZNVER1-NEXT: retq # sched: [1:0.50]
694 %1 = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %a0, <16 x i16> %a1)
695 %2 = bitcast <32 x i8> %1 to <16 x i16>
696 %3 = load <16 x i16>, <16 x i16> *%a2, align 32
697 %4 = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %2, <16 x i16> %3)
698 ret <32 x i8> %4
699}
700declare <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16>, <16 x i16>) nounwind readnone
701
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000702define <32 x i8> @test_paddb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
Simon Pilgrim84846982017-08-01 15:14:35 +0000703; GENERIC-LABEL: test_paddb:
704; GENERIC: # BB#0:
705; GENERIC-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
706; GENERIC-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
707; GENERIC-NEXT: retq # sched: [1:1.00]
708;
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000709; HASWELL-LABEL: test_paddb:
710; HASWELL: # BB#0:
711; HASWELL-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
Gadi Haberd76f7b82017-08-28 10:04:16 +0000712; HASWELL-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
713; HASWELL-NEXT: retq # sched: [2:1.00]
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000714;
Gadi Haber767d98b2017-08-30 08:08:50 +0000715; SKYLAKE-LABEL: test_paddb:
716; SKYLAKE: # BB#0:
717; SKYLAKE-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
718; SKYLAKE-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
719; SKYLAKE-NEXT: retq # sched: [2:1.00]
720;
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000721; ZNVER1-LABEL: test_paddb:
722; ZNVER1: # BB#0:
Craig Topper106b5b62017-07-19 02:45:14 +0000723; ZNVER1-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
724; ZNVER1-NEXT: vpaddb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
Ashutosh Nemabfcac0b2017-08-31 12:38:35 +0000725; ZNVER1-NEXT: retq # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000726 %1 = add <32 x i8> %a0, %a1
727 %2 = load <32 x i8>, <32 x i8> *%a2, align 32
728 %3 = add <32 x i8> %1, %2
729 ret <32 x i8> %3
730}
731
732define <8 x i32> @test_paddd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
Simon Pilgrim84846982017-08-01 15:14:35 +0000733; GENERIC-LABEL: test_paddd:
734; GENERIC: # BB#0:
735; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
736; GENERIC-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
737; GENERIC-NEXT: retq # sched: [1:1.00]
738;
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000739; HASWELL-LABEL: test_paddd:
740; HASWELL: # BB#0:
741; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
Gadi Haberd76f7b82017-08-28 10:04:16 +0000742; HASWELL-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
743; HASWELL-NEXT: retq # sched: [2:1.00]
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000744;
Gadi Haber767d98b2017-08-30 08:08:50 +0000745; SKYLAKE-LABEL: test_paddd:
746; SKYLAKE: # BB#0:
747; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
748; SKYLAKE-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
749; SKYLAKE-NEXT: retq # sched: [2:1.00]
750;
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000751; ZNVER1-LABEL: test_paddd:
752; ZNVER1: # BB#0:
Craig Topper106b5b62017-07-19 02:45:14 +0000753; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
754; ZNVER1-NEXT: vpaddd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
Ashutosh Nemabfcac0b2017-08-31 12:38:35 +0000755; ZNVER1-NEXT: retq # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000756 %1 = add <8 x i32> %a0, %a1
757 %2 = load <8 x i32>, <8 x i32> *%a2, align 32
758 %3 = add <8 x i32> %1, %2
759 ret <8 x i32> %3
760}
761
762define <4 x i64> @test_paddq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
Simon Pilgrim84846982017-08-01 15:14:35 +0000763; GENERIC-LABEL: test_paddq:
764; GENERIC: # BB#0:
765; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
766; GENERIC-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
767; GENERIC-NEXT: retq # sched: [1:1.00]
768;
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000769; HASWELL-LABEL: test_paddq:
770; HASWELL: # BB#0:
771; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
Gadi Haberd76f7b82017-08-28 10:04:16 +0000772; HASWELL-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
773; HASWELL-NEXT: retq # sched: [2:1.00]
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000774;
Gadi Haber767d98b2017-08-30 08:08:50 +0000775; SKYLAKE-LABEL: test_paddq:
776; SKYLAKE: # BB#0:
777; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
778; SKYLAKE-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
779; SKYLAKE-NEXT: retq # sched: [2:1.00]
780;
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000781; ZNVER1-LABEL: test_paddq:
782; ZNVER1: # BB#0:
Craig Topper106b5b62017-07-19 02:45:14 +0000783; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
784; ZNVER1-NEXT: vpaddq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
Ashutosh Nemabfcac0b2017-08-31 12:38:35 +0000785; ZNVER1-NEXT: retq # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000786 %1 = add <4 x i64> %a0, %a1
787 %2 = load <4 x i64>, <4 x i64> *%a2, align 32
788 %3 = add <4 x i64> %1, %2
789 ret <4 x i64> %3
790}
791
Simon Pilgrim0af5a7722017-09-12 15:01:20 +0000792define <32 x i8> @test_paddsb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
793; GENERIC-LABEL: test_paddsb:
794; GENERIC: # BB#0:
795; GENERIC-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
796; GENERIC-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
797; GENERIC-NEXT: retq # sched: [1:1.00]
798;
799; HASWELL-LABEL: test_paddsb:
800; HASWELL: # BB#0:
801; HASWELL-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
802; HASWELL-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
803; HASWELL-NEXT: retq # sched: [2:1.00]
804;
805; SKYLAKE-LABEL: test_paddsb:
806; SKYLAKE: # BB#0:
807; SKYLAKE-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
808; SKYLAKE-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
809; SKYLAKE-NEXT: retq # sched: [2:1.00]
810;
811; ZNVER1-LABEL: test_paddsb:
812; ZNVER1: # BB#0:
813; ZNVER1-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
814; ZNVER1-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
815; ZNVER1-NEXT: retq # sched: [1:0.50]
816 %1 = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %a0, <32 x i8> %a1)
817 %2 = load <32 x i8>, <32 x i8> *%a2, align 32
818 %3 = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %1, <32 x i8> %2)
819 ret <32 x i8> %3
820}
821declare <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8>, <32 x i8>) nounwind readnone
822
823define <16 x i16> @test_paddsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
824; GENERIC-LABEL: test_paddsw:
825; GENERIC: # BB#0:
826; GENERIC-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
827; GENERIC-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
828; GENERIC-NEXT: retq # sched: [1:1.00]
829;
830; HASWELL-LABEL: test_paddsw:
831; HASWELL: # BB#0:
832; HASWELL-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
833; HASWELL-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
834; HASWELL-NEXT: retq # sched: [2:1.00]
835;
836; SKYLAKE-LABEL: test_paddsw:
837; SKYLAKE: # BB#0:
838; SKYLAKE-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
839; SKYLAKE-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
840; SKYLAKE-NEXT: retq # sched: [2:1.00]
841;
842; ZNVER1-LABEL: test_paddsw:
843; ZNVER1: # BB#0:
844; ZNVER1-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
845; ZNVER1-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
846; ZNVER1-NEXT: retq # sched: [1:0.50]
847 %1 = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %a0, <16 x i16> %a1)
848 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
849 %3 = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %1, <16 x i16> %2)
850 ret <16 x i16> %3
851}
852declare <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16>, <16 x i16>) nounwind readnone
853
854define <32 x i8> @test_paddusb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
855; GENERIC-LABEL: test_paddusb:
856; GENERIC: # BB#0:
857; GENERIC-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
858; GENERIC-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
859; GENERIC-NEXT: retq # sched: [1:1.00]
860;
861; HASWELL-LABEL: test_paddusb:
862; HASWELL: # BB#0:
863; HASWELL-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
864; HASWELL-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
865; HASWELL-NEXT: retq # sched: [2:1.00]
866;
867; SKYLAKE-LABEL: test_paddusb:
868; SKYLAKE: # BB#0:
869; SKYLAKE-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
870; SKYLAKE-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
871; SKYLAKE-NEXT: retq # sched: [2:1.00]
872;
873; ZNVER1-LABEL: test_paddusb:
874; ZNVER1: # BB#0:
875; ZNVER1-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
876; ZNVER1-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
877; ZNVER1-NEXT: retq # sched: [1:0.50]
878 %1 = call <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8> %a0, <32 x i8> %a1)
879 %2 = load <32 x i8>, <32 x i8> *%a2, align 32
880 %3 = call <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8> %1, <32 x i8> %2)
881 ret <32 x i8> %3
882}
883declare <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8>, <32 x i8>) nounwind readnone
884
885define <16 x i16> @test_paddusw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
886; GENERIC-LABEL: test_paddusw:
887; GENERIC: # BB#0:
888; GENERIC-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
889; GENERIC-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
890; GENERIC-NEXT: retq # sched: [1:1.00]
891;
892; HASWELL-LABEL: test_paddusw:
893; HASWELL: # BB#0:
894; HASWELL-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
895; HASWELL-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
896; HASWELL-NEXT: retq # sched: [2:1.00]
897;
898; SKYLAKE-LABEL: test_paddusw:
899; SKYLAKE: # BB#0:
900; SKYLAKE-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
901; SKYLAKE-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
902; SKYLAKE-NEXT: retq # sched: [2:1.00]
903;
904; ZNVER1-LABEL: test_paddusw:
905; ZNVER1: # BB#0:
906; ZNVER1-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
907; ZNVER1-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
908; ZNVER1-NEXT: retq # sched: [1:0.50]
909 %1 = call <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16> %a0, <16 x i16> %a1)
910 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
911 %3 = call <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16> %1, <16 x i16> %2)
912 ret <16 x i16> %3
913}
914declare <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16>, <16 x i16>) nounwind readnone
915
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000916define <16 x i16> @test_paddw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
Simon Pilgrim84846982017-08-01 15:14:35 +0000917; GENERIC-LABEL: test_paddw:
918; GENERIC: # BB#0:
919; GENERIC-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
920; GENERIC-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
921; GENERIC-NEXT: retq # sched: [1:1.00]
922;
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000923; HASWELL-LABEL: test_paddw:
924; HASWELL: # BB#0:
925; HASWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
Gadi Haberd76f7b82017-08-28 10:04:16 +0000926; HASWELL-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
927; HASWELL-NEXT: retq # sched: [2:1.00]
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000928;
Gadi Haber767d98b2017-08-30 08:08:50 +0000929; SKYLAKE-LABEL: test_paddw:
930; SKYLAKE: # BB#0:
931; SKYLAKE-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
932; SKYLAKE-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
933; SKYLAKE-NEXT: retq # sched: [2:1.00]
934;
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000935; ZNVER1-LABEL: test_paddw:
936; ZNVER1: # BB#0:
Craig Topper106b5b62017-07-19 02:45:14 +0000937; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
938; ZNVER1-NEXT: vpaddw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
Ashutosh Nemabfcac0b2017-08-31 12:38:35 +0000939; ZNVER1-NEXT: retq # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000940 %1 = add <16 x i16> %a0, %a1
941 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
942 %3 = add <16 x i16> %1, %2
943 ret <16 x i16> %3
944}
945
Simon Pilgrim0af5a7722017-09-12 15:01:20 +0000946define <32 x i8> @test_palignr(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
947; GENERIC-LABEL: test_palignr:
948; GENERIC: # BB#0:
949; GENERIC-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00]
950; GENERIC-NEXT: vpalignr {{.*#+}} ymm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],mem[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [5:1.00]
951; GENERIC-NEXT: retq # sched: [1:1.00]
952;
953; HASWELL-LABEL: test_palignr:
954; HASWELL: # BB#0:
955; HASWELL-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00]
956; HASWELL-NEXT: vpalignr {{.*#+}} ymm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],mem[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00]
957; HASWELL-NEXT: retq # sched: [2:1.00]
958;
959; SKYLAKE-LABEL: test_palignr:
960; SKYLAKE: # BB#0:
961; SKYLAKE-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00]
962; SKYLAKE-NEXT: vpalignr {{.*#+}} ymm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],mem[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00]
963; SKYLAKE-NEXT: retq # sched: [2:1.00]
964;
965; ZNVER1-LABEL: test_palignr:
966; ZNVER1: # BB#0:
967; ZNVER1-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:0.25]
968; ZNVER1-NEXT: vpalignr {{.*#+}} ymm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],mem[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [8:0.50]
969; ZNVER1-NEXT: retq # sched: [1:0.50]
970 %1 = shufflevector <32 x i8> %a1, <32 x i8> %a0, <32 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48>
971 %2 = load <32 x i8>, <32 x i8> *%a2, align 32
972 %3 = shufflevector <32 x i8> %2, <32 x i8> %1, <32 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48>
973 ret <32 x i8> %3
974}
975
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000976define <4 x i64> @test_pand(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
Simon Pilgrim84846982017-08-01 15:14:35 +0000977; GENERIC-LABEL: test_pand:
978; GENERIC: # BB#0:
979; GENERIC-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
980; GENERIC-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
981; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
982; GENERIC-NEXT: retq # sched: [1:1.00]
983;
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000984; HASWELL-LABEL: test_pand:
985; HASWELL: # BB#0:
986; HASWELL-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
Gadi Haberd76f7b82017-08-28 10:04:16 +0000987; HASWELL-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000988; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
Gadi Haberd76f7b82017-08-28 10:04:16 +0000989; HASWELL-NEXT: retq # sched: [2:1.00]
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000990;
Gadi Haber767d98b2017-08-30 08:08:50 +0000991; SKYLAKE-LABEL: test_pand:
992; SKYLAKE: # BB#0:
993; SKYLAKE-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
994; SKYLAKE-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
995; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
996; SKYLAKE-NEXT: retq # sched: [2:1.00]
997;
Simon Pilgrim946f08c2017-05-06 13:46:09 +0000998; ZNVER1-LABEL: test_pand:
999; ZNVER1: # BB#0:
Craig Topper106b5b62017-07-19 02:45:14 +00001000; ZNVER1-NEXT: vpand %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
1001; ZNVER1-NEXT: vpand (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
1002; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
Ashutosh Nemabfcac0b2017-08-31 12:38:35 +00001003; ZNVER1-NEXT: retq # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00001004 %1 = and <4 x i64> %a0, %a1
1005 %2 = load <4 x i64>, <4 x i64> *%a2, align 32
1006 %3 = and <4 x i64> %1, %2
1007 %4 = add <4 x i64> %3, %a1
1008 ret <4 x i64> %4
1009}
1010
1011define <4 x i64> @test_pandn(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
Simon Pilgrim84846982017-08-01 15:14:35 +00001012; GENERIC-LABEL: test_pandn:
1013; GENERIC: # BB#0:
1014; GENERIC-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
1015; GENERIC-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [5:1.00]
1016; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
1017; GENERIC-NEXT: retq # sched: [1:1.00]
1018;
Simon Pilgrim946f08c2017-05-06 13:46:09 +00001019; HASWELL-LABEL: test_pandn:
1020; HASWELL: # BB#0:
1021; HASWELL-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
Gadi Haberd76f7b82017-08-28 10:04:16 +00001022; HASWELL-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00001023; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
Gadi Haberd76f7b82017-08-28 10:04:16 +00001024; HASWELL-NEXT: retq # sched: [2:1.00]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00001025;
Gadi Haber767d98b2017-08-30 08:08:50 +00001026; SKYLAKE-LABEL: test_pandn:
1027; SKYLAKE: # BB#0:
1028; SKYLAKE-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
1029; SKYLAKE-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [1:0.50]
1030; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1031; SKYLAKE-NEXT: retq # sched: [2:1.00]
1032;
Simon Pilgrim946f08c2017-05-06 13:46:09 +00001033; ZNVER1-LABEL: test_pandn:
1034; ZNVER1: # BB#0:
Craig Topper106b5b62017-07-19 02:45:14 +00001035; ZNVER1-NEXT: vpandn %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
1036; ZNVER1-NEXT: vpandn (%rdi), %ymm0, %ymm1 # sched: [8:0.50]
1037; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
Ashutosh Nemabfcac0b2017-08-31 12:38:35 +00001038; ZNVER1-NEXT: retq # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00001039 %1 = xor <4 x i64> %a0, <i64 -1, i64 -1, i64 -1, i64 -1>
1040 %2 = and <4 x i64> %a1, %1
1041 %3 = load <4 x i64>, <4 x i64> *%a2, align 32
1042 %4 = xor <4 x i64> %2, <i64 -1, i64 -1, i64 -1, i64 -1>
1043 %5 = and <4 x i64> %3, %4
1044 %6 = add <4 x i64> %2, %5
1045 ret <4 x i64> %6
1046}
1047
Simon Pilgrim0af5a7722017-09-12 15:01:20 +00001048define <32 x i8> @test_pavgb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
1049; GENERIC-LABEL: test_pavgb:
1050; GENERIC: # BB#0:
1051; GENERIC-NEXT: vpavgb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
1052; GENERIC-NEXT: vpavgb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
1053; GENERIC-NEXT: retq # sched: [1:1.00]
1054;
1055; HASWELL-LABEL: test_pavgb:
1056; HASWELL: # BB#0:
1057; HASWELL-NEXT: vpavgb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1058; HASWELL-NEXT: vpavgb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
1059; HASWELL-NEXT: retq # sched: [2:1.00]
1060;
1061; SKYLAKE-LABEL: test_pavgb:
1062; SKYLAKE: # BB#0:
1063; SKYLAKE-NEXT: vpavgb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1064; SKYLAKE-NEXT: vpavgb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
1065; SKYLAKE-NEXT: retq # sched: [2:1.00]
1066;
1067; ZNVER1-LABEL: test_pavgb:
1068; ZNVER1: # BB#0:
1069; ZNVER1-NEXT: vpavgb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
1070; ZNVER1-NEXT: vpavgb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
1071; ZNVER1-NEXT: retq # sched: [1:0.50]
1072 %1 = zext <32 x i8> %a0 to <32 x i16>
1073 %2 = zext <32 x i8> %a1 to <32 x i16>
1074 %3 = add <32 x i16> %1, %2
1075 %4 = add <32 x i16> %3, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
1076 %5 = lshr <32 x i16> %4, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
1077 %6 = trunc <32 x i16> %5 to <32 x i8>
1078 %7 = load <32 x i8>, <32 x i8> *%a2, align 32
1079 %8 = zext <32 x i8> %6 to <32 x i16>
1080 %9 = zext <32 x i8> %7 to <32 x i16>
1081 %10 = add <32 x i16> %8, %9
1082 %11 = add <32 x i16> %10, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
1083 %12 = lshr <32 x i16> %11, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
1084 %13 = trunc <32 x i16> %12 to <32 x i8>
1085 ret <32 x i8> %13
1086}
1087
1088define <16 x i16> @test_pavgw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
1089; GENERIC-LABEL: test_pavgw:
1090; GENERIC: # BB#0:
1091; GENERIC-NEXT: vpavgw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
1092; GENERIC-NEXT: vpavgw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
1093; GENERIC-NEXT: retq # sched: [1:1.00]
1094;
1095; HASWELL-LABEL: test_pavgw:
1096; HASWELL: # BB#0:
1097; HASWELL-NEXT: vpavgw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1098; HASWELL-NEXT: vpavgw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
1099; HASWELL-NEXT: retq # sched: [2:1.00]
1100;
1101; SKYLAKE-LABEL: test_pavgw:
1102; SKYLAKE: # BB#0:
1103; SKYLAKE-NEXT: vpavgw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1104; SKYLAKE-NEXT: vpavgw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
1105; SKYLAKE-NEXT: retq # sched: [2:1.00]
1106;
1107; ZNVER1-LABEL: test_pavgw:
1108; ZNVER1: # BB#0:
1109; ZNVER1-NEXT: vpavgw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
1110; ZNVER1-NEXT: vpavgw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
1111; ZNVER1-NEXT: retq # sched: [1:0.50]
1112 %1 = zext <16 x i16> %a0 to <16 x i32>
1113 %2 = zext <16 x i16> %a1 to <16 x i32>
1114 %3 = add <16 x i32> %1, %2
1115 %4 = add <16 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
1116 %5 = lshr <16 x i32> %4, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
1117 %6 = trunc <16 x i32> %5 to <16 x i16>
1118 %7 = load <16 x i16>, <16 x i16> *%a2, align 32
1119 %8 = zext <16 x i16> %6 to <16 x i32>
1120 %9 = zext <16 x i16> %7 to <16 x i32>
1121 %10 = add <16 x i32> %8, %9
1122 %11 = add <16 x i32> %10, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
1123 %12 = lshr <16 x i32> %11, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
1124 %13 = trunc <16 x i32> %12 to <16 x i16>
1125 ret <16 x i16> %13
1126}
1127
1128define <4 x i32> @test_pblendd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
1129; GENERIC-LABEL: test_pblendd:
1130; GENERIC: # BB#0:
1131; GENERIC-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3] sched: [1:0.50]
1132; GENERIC-NEXT: vpblendd {{.*#+}} xmm1 = mem[0],xmm1[1],mem[2],xmm1[3] sched: [5:0.50]
1133; GENERIC-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
1134; GENERIC-NEXT: retq # sched: [1:1.00]
1135;
1136; HASWELL-LABEL: test_pblendd:
1137; HASWELL: # BB#0:
1138; HASWELL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3] sched: [1:0.33]
1139; HASWELL-NEXT: vpblendd {{.*#+}} xmm1 = mem[0],xmm1[1],mem[2],xmm1[3] sched: [1:0.50]
1140; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
1141; HASWELL-NEXT: retq # sched: [2:1.00]
1142;
1143; SKYLAKE-LABEL: test_pblendd:
1144; SKYLAKE: # BB#0:
1145; SKYLAKE-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3] sched: [1:0.33]
1146; SKYLAKE-NEXT: vpblendd {{.*#+}} xmm1 = mem[0],xmm1[1],mem[2],xmm1[3] sched: [1:0.50]
1147; SKYLAKE-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
1148; SKYLAKE-NEXT: retq # sched: [2:1.00]
1149;
1150; ZNVER1-LABEL: test_pblendd:
1151; ZNVER1: # BB#0:
1152; ZNVER1-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3] sched: [1:0.50]
1153; ZNVER1-NEXT: vpblendd {{.*#+}} xmm1 = mem[0],xmm1[1],mem[2],xmm1[3] sched: [8:1.00]
1154; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
1155; ZNVER1-NEXT: retq # sched: [1:0.50]
1156 %1 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> <i32 4, i32 5, i32 6, i32 3>
1157 %2 = load <4 x i32>, <4 x i32> *%a2, align 16
1158 %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
1159 %4 = add <4 x i32> %a0, %3
1160 ret <4 x i32> %4
1161}
1162
1163define <8 x i32> @test_pblendd_ymm(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
1164; GENERIC-LABEL: test_pblendd_ymm:
1165; GENERIC: # BB#0:
1166; GENERIC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7] sched: [1:0.50]
1167; GENERIC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2],ymm1[3,4,5,6,7] sched: [5:0.50]
1168; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
1169; GENERIC-NEXT: retq # sched: [1:1.00]
1170;
1171; HASWELL-LABEL: test_pblendd_ymm:
1172; HASWELL: # BB#0:
1173; HASWELL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7] sched: [1:0.33]
1174; HASWELL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2],ymm1[3,4,5,6,7] sched: [1:0.50]
1175; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1176; HASWELL-NEXT: retq # sched: [2:1.00]
1177;
1178; SKYLAKE-LABEL: test_pblendd_ymm:
1179; SKYLAKE: # BB#0:
1180; SKYLAKE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7] sched: [1:0.33]
1181; SKYLAKE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2],ymm1[3,4,5,6,7] sched: [1:0.50]
1182; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1183; SKYLAKE-NEXT: retq # sched: [2:1.00]
1184;
1185; ZNVER1-LABEL: test_pblendd_ymm:
1186; ZNVER1: # BB#0:
1187; ZNVER1-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7] sched: [1:0.50]
1188; ZNVER1-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2],ymm1[3,4,5,6,7] sched: [9:1.50]
1189; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
1190; ZNVER1-NEXT: retq # sched: [1:0.50]
1191 %1 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 8, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 15>
1192 %2 = load <8 x i32>, <8 x i32> *%a2, align 32
1193 %3 = shufflevector <8 x i32> %1, <8 x i32> %2, <8 x i32> <i32 0, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 7>
1194 %4 = add <8 x i32> %a0, %3
1195 ret <8 x i32> %4
1196}
1197
1198define <32 x i8> @test_pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2, <32 x i8> *%a3, <32 x i8> %a4) {
1199; GENERIC-LABEL: test_pblendvb:
1200; GENERIC: # BB#0:
1201; GENERIC-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00]
1202; GENERIC-NEXT: vpblendvb %ymm3, (%rdi), %ymm0, %ymm0 # sched: [6:1.00]
1203; GENERIC-NEXT: retq # sched: [1:1.00]
1204;
1205; HASWELL-LABEL: test_pblendvb:
1206; HASWELL: # BB#0:
1207; HASWELL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
1208; HASWELL-NEXT: vpblendvb %ymm3, (%rdi), %ymm0, %ymm0 # sched: [2:2.00]
1209; HASWELL-NEXT: retq # sched: [2:1.00]
1210;
1211; SKYLAKE-LABEL: test_pblendvb:
1212; SKYLAKE: # BB#0:
1213; SKYLAKE-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:2.00]
1214; SKYLAKE-NEXT: vpblendvb %ymm3, (%rdi), %ymm0, %ymm0 # sched: [2:2.00]
1215; SKYLAKE-NEXT: retq # sched: [2:1.00]
1216;
1217; ZNVER1-LABEL: test_pblendvb:
1218; ZNVER1: # BB#0:
1219; ZNVER1-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
1220; ZNVER1-NEXT: vpblendvb %ymm3, (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
1221; ZNVER1-NEXT: retq # sched: [1:0.50]
1222 %1 = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2)
1223 %2 = load <32 x i8>, <32 x i8> *%a3, align 32
1224 %3 = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %1, <32 x i8> %2, <32 x i8> %a4)
1225 ret <32 x i8> %3
1226}
1227declare <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8>, <32 x i8>, <32 x i8>) nounwind readnone
1228
1229define <16 x i16> @test_pblendw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
1230; GENERIC-LABEL: test_pblendw:
1231; GENERIC: # BB#0:
1232; GENERIC-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7,8,9],ymm1[10,11,12],ymm0[13,14,15] sched: [1:0.50]
1233; GENERIC-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7],mem[8],ymm0[9],mem[10],ymm0[11],mem[12],ymm0[13],mem[14],ymm0[15] sched: [5:0.50]
1234; GENERIC-NEXT: retq # sched: [1:1.00]
1235;
1236; HASWELL-LABEL: test_pblendw:
1237; HASWELL: # BB#0:
1238; HASWELL-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7,8,9],ymm1[10,11,12],ymm0[13,14,15] sched: [1:1.00]
1239; HASWELL-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7],mem[8],ymm0[9],mem[10],ymm0[11],mem[12],ymm0[13],mem[14],ymm0[15] sched: [4:1.00]
1240; HASWELL-NEXT: retq # sched: [2:1.00]
1241;
1242; SKYLAKE-LABEL: test_pblendw:
1243; SKYLAKE: # BB#0:
1244; SKYLAKE-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7,8,9],ymm1[10,11,12],ymm0[13,14,15] sched: [1:1.00]
1245; SKYLAKE-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7],mem[8],ymm0[9],mem[10],ymm0[11],mem[12],ymm0[13],mem[14],ymm0[15] sched: [4:1.00]
1246; SKYLAKE-NEXT: retq # sched: [2:1.00]
1247;
1248; ZNVER1-LABEL: test_pblendw:
1249; ZNVER1: # BB#0:
1250; ZNVER1-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7,8,9],ymm1[10,11,12],ymm0[13,14,15] sched: [2:0.33]
1251; ZNVER1-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7],mem[8],ymm0[9],mem[10],ymm0[11],mem[12],ymm0[13],mem[14],ymm0[15] sched: [9:0.50]
1252; ZNVER1-NEXT: retq # sched: [1:0.50]
1253 %1 = shufflevector <16 x i16> %a0, <16 x i16> %a1, <16 x i32> <i32 0, i32 1, i32 18, i32 19, i32 20, i32 5, i32 6, i32 7, i32 8, i32 9, i32 26, i32 27, i32 28, i32 13, i32 14, i32 15>
1254 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
1255 %3 = shufflevector <16 x i16> %1, <16 x i16> %2, <16 x i32> <i32 16, i32 1, i32 18, i32 3, i32 20, i32 5, i32 22, i32 7, i32 24, i32 9, i32 26, i32 11, i32 28, i32 13, i32 30, i32 15>
1256 ret <16 x i16> %3
1257}
1258
Simon Pilgrimd2d2b372017-09-12 12:59:20 +00001259define <16 x i8> @test_pbroadcastb(<16 x i8> %a0, <16 x i8> *%a1) {
1260; GENERIC-LABEL: test_pbroadcastb:
1261; GENERIC: # BB#0:
1262; GENERIC-NEXT: vpbroadcastb %xmm0, %xmm0 # sched: [1:1.00]
1263; GENERIC-NEXT: vpbroadcastb (%rdi), %xmm1 # sched: [4:0.50]
1264; GENERIC-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
1265; GENERIC-NEXT: retq # sched: [1:1.00]
1266;
1267; HASWELL-LABEL: test_pbroadcastb:
1268; HASWELL: # BB#0:
1269; HASWELL-NEXT: vpbroadcastb %xmm0, %xmm0 # sched: [3:1.00]
1270; HASWELL-NEXT: vpbroadcastb (%rdi), %xmm1 # sched: [4:1.00]
1271; HASWELL-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
1272; HASWELL-NEXT: retq # sched: [2:1.00]
1273;
1274; SKYLAKE-LABEL: test_pbroadcastb:
1275; SKYLAKE: # BB#0:
1276; SKYLAKE-NEXT: vpbroadcastb %xmm0, %xmm0 # sched: [3:1.00]
1277; SKYLAKE-NEXT: vpbroadcastb (%rdi), %xmm1 # sched: [4:1.00]
1278; SKYLAKE-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
1279; SKYLAKE-NEXT: retq # sched: [2:1.00]
1280;
1281; ZNVER1-LABEL: test_pbroadcastb:
1282; ZNVER1: # BB#0:
1283; ZNVER1-NEXT: vpbroadcastb (%rdi), %xmm1 # sched: [8:1.00]
1284; ZNVER1-NEXT: vpbroadcastb %xmm0, %xmm0 # sched: [1:0.25]
1285; ZNVER1-NEXT: vpaddb %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
1286; ZNVER1-NEXT: retq # sched: [1:0.50]
1287 %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> zeroinitializer
1288 %2 = load <16 x i8>, <16 x i8> *%a1, align 16
1289 %3 = shufflevector <16 x i8> %2, <16 x i8> undef, <16 x i32> zeroinitializer
1290 %4 = add <16 x i8> %1, %3
1291 ret <16 x i8> %4
1292}
1293
1294define <32 x i8> @test_pbroadcastb_ymm(<32 x i8> %a0, <32 x i8> *%a1) {
1295; GENERIC-LABEL: test_pbroadcastb_ymm:
1296; GENERIC: # BB#0:
1297; GENERIC-NEXT: vpbroadcastb %xmm0, %ymm0 # sched: [1:1.00]
1298; GENERIC-NEXT: vpbroadcastb (%rdi), %ymm1 # sched: [4:0.50]
1299; GENERIC-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
1300; GENERIC-NEXT: retq # sched: [1:1.00]
1301;
1302; HASWELL-LABEL: test_pbroadcastb_ymm:
1303; HASWELL: # BB#0:
1304; HASWELL-NEXT: vpbroadcastb %xmm0, %ymm0 # sched: [3:1.00]
1305; HASWELL-NEXT: vpbroadcastb (%rdi), %ymm1 # sched: [4:1.00]
1306; HASWELL-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1307; HASWELL-NEXT: retq # sched: [2:1.00]
1308;
1309; SKYLAKE-LABEL: test_pbroadcastb_ymm:
1310; SKYLAKE: # BB#0:
1311; SKYLAKE-NEXT: vpbroadcastb %xmm0, %ymm0 # sched: [3:1.00]
1312; SKYLAKE-NEXT: vpbroadcastb (%rdi), %ymm1 # sched: [4:1.00]
1313; SKYLAKE-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1314; SKYLAKE-NEXT: retq # sched: [2:1.00]
1315;
1316; ZNVER1-LABEL: test_pbroadcastb_ymm:
1317; ZNVER1: # BB#0:
1318; ZNVER1-NEXT: vpbroadcastb (%rdi), %ymm1 # sched: [8:2.00]
1319; ZNVER1-NEXT: vpbroadcastb %xmm0, %ymm0 # sched: [2:0.25]
1320; ZNVER1-NEXT: vpaddb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
1321; ZNVER1-NEXT: retq # sched: [1:0.50]
1322 %1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> zeroinitializer
1323 %2 = load <32 x i8>, <32 x i8> *%a1, align 32
1324 %3 = shufflevector <32 x i8> %2, <32 x i8> undef, <32 x i32> zeroinitializer
1325 %4 = add <32 x i8> %1, %3
1326 ret <32 x i8> %4
1327}
1328
1329define <4 x i32> @test_pbroadcastd(<4 x i32> %a0, <4 x i32> *%a1) {
1330; GENERIC-LABEL: test_pbroadcastd:
1331; GENERIC: # BB#0:
1332; GENERIC-NEXT: vpbroadcastd %xmm0, %xmm0 # sched: [1:1.00]
1333; GENERIC-NEXT: vpbroadcastd (%rdi), %xmm1 # sched: [4:0.50]
1334; GENERIC-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
1335; GENERIC-NEXT: retq # sched: [1:1.00]
1336;
1337; HASWELL-LABEL: test_pbroadcastd:
1338; HASWELL: # BB#0:
1339; HASWELL-NEXT: vpbroadcastd %xmm0, %xmm0 # sched: [1:1.00]
1340; HASWELL-NEXT: vpbroadcastd (%rdi), %xmm1 # sched: [1:0.50]
1341; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
1342; HASWELL-NEXT: retq # sched: [2:1.00]
1343;
1344; SKYLAKE-LABEL: test_pbroadcastd:
1345; SKYLAKE: # BB#0:
1346; SKYLAKE-NEXT: vpbroadcastd %xmm0, %xmm0 # sched: [1:1.00]
1347; SKYLAKE-NEXT: vpbroadcastd (%rdi), %xmm1 # sched: [1:0.50]
1348; SKYLAKE-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
1349; SKYLAKE-NEXT: retq # sched: [2:1.00]
1350;
1351; ZNVER1-LABEL: test_pbroadcastd:
1352; ZNVER1: # BB#0:
1353; ZNVER1-NEXT: vpbroadcastd (%rdi), %xmm1 # sched: [8:0.50]
1354; ZNVER1-NEXT: vpbroadcastd %xmm0, %xmm0 # sched: [1:0.25]
1355; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
1356; ZNVER1-NEXT: retq # sched: [1:0.50]
1357 %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <4 x i32> zeroinitializer
1358 %2 = load <4 x i32>, <4 x i32> *%a1, align 16
1359 %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> zeroinitializer
1360 %4 = add <4 x i32> %1, %3
1361 ret <4 x i32> %4
1362}
1363
1364define <8 x i32> @test_pbroadcastd_ymm(<8 x i32> %a0, <8 x i32> *%a1) {
1365; GENERIC-LABEL: test_pbroadcastd_ymm:
1366; GENERIC: # BB#0:
1367; GENERIC-NEXT: vpbroadcastd %xmm0, %ymm0 # sched: [1:1.00]
1368; GENERIC-NEXT: vpbroadcastd (%rdi), %ymm1 # sched: [4:0.50]
1369; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
1370; GENERIC-NEXT: retq # sched: [1:1.00]
1371;
1372; HASWELL-LABEL: test_pbroadcastd_ymm:
1373; HASWELL: # BB#0:
1374; HASWELL-NEXT: vpbroadcastd %xmm0, %ymm0 # sched: [3:1.00]
1375; HASWELL-NEXT: vpbroadcastd (%rdi), %ymm1 # sched: [1:0.50]
1376; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1377; HASWELL-NEXT: retq # sched: [2:1.00]
1378;
1379; SKYLAKE-LABEL: test_pbroadcastd_ymm:
1380; SKYLAKE: # BB#0:
1381; SKYLAKE-NEXT: vpbroadcastd %xmm0, %ymm0 # sched: [3:1.00]
1382; SKYLAKE-NEXT: vpbroadcastd (%rdi), %ymm1 # sched: [1:0.50]
1383; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1384; SKYLAKE-NEXT: retq # sched: [2:1.00]
1385;
1386; ZNVER1-LABEL: test_pbroadcastd_ymm:
1387; ZNVER1: # BB#0:
1388; ZNVER1-NEXT: vpbroadcastd (%rdi), %ymm1 # sched: [8:0.50]
1389; ZNVER1-NEXT: vpbroadcastd %xmm0, %ymm0 # sched: [2:0.25]
1390; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
1391; ZNVER1-NEXT: retq # sched: [1:0.50]
1392 %1 = shufflevector <8 x i32> %a0, <8 x i32> undef, <8 x i32> zeroinitializer
1393 %2 = load <8 x i32>, <8 x i32> *%a1, align 32
1394 %3 = shufflevector <8 x i32> %2, <8 x i32> undef, <8 x i32> zeroinitializer
1395 %4 = add <8 x i32> %1, %3
1396 ret <8 x i32> %4
1397}
1398
1399define <2 x i64> @test_pbroadcastq(<2 x i64> %a0, <2 x i64> *%a1) {
1400; GENERIC-LABEL: test_pbroadcastq:
1401; GENERIC: # BB#0:
1402; GENERIC-NEXT: vpbroadcastq %xmm0, %xmm0 # sched: [1:1.00]
1403; GENERIC-NEXT: vpbroadcastq (%rdi), %xmm1 # sched: [4:0.50]
1404; GENERIC-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
1405; GENERIC-NEXT: retq # sched: [1:1.00]
1406;
1407; HASWELL-LABEL: test_pbroadcastq:
1408; HASWELL: # BB#0:
1409; HASWELL-NEXT: vpbroadcastq %xmm0, %xmm0 # sched: [1:1.00]
1410; HASWELL-NEXT: vpbroadcastq (%rdi), %xmm1 # sched: [1:0.50]
1411; HASWELL-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
1412; HASWELL-NEXT: retq # sched: [2:1.00]
1413;
1414; SKYLAKE-LABEL: test_pbroadcastq:
1415; SKYLAKE: # BB#0:
1416; SKYLAKE-NEXT: vpbroadcastq %xmm0, %xmm0 # sched: [1:1.00]
1417; SKYLAKE-NEXT: vpbroadcastq (%rdi), %xmm1 # sched: [1:0.50]
1418; SKYLAKE-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
1419; SKYLAKE-NEXT: retq # sched: [2:1.00]
1420;
1421; ZNVER1-LABEL: test_pbroadcastq:
1422; ZNVER1: # BB#0:
1423; ZNVER1-NEXT: vpbroadcastq (%rdi), %xmm1 # sched: [8:0.50]
1424; ZNVER1-NEXT: vpbroadcastq %xmm0, %xmm0 # sched: [1:0.25]
1425; ZNVER1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
1426; ZNVER1-NEXT: retq # sched: [1:0.50]
1427 %1 = shufflevector <2 x i64> %a0, <2 x i64> undef, <2 x i32> zeroinitializer
1428 %2 = load <2 x i64>, <2 x i64> *%a1, align 16
1429 %3 = shufflevector <2 x i64> %2, <2 x i64> undef, <2 x i32> zeroinitializer
1430 %4 = add <2 x i64> %1, %3
1431 ret <2 x i64> %4
1432}
1433
1434define <4 x i64> @test_pbroadcastq_ymm(<4 x i64> %a0, <4 x i64> *%a1) {
1435; GENERIC-LABEL: test_pbroadcastq_ymm:
1436; GENERIC: # BB#0:
1437; GENERIC-NEXT: vpbroadcastq %xmm0, %ymm0 # sched: [1:1.00]
1438; GENERIC-NEXT: vpbroadcastq (%rdi), %ymm1 # sched: [4:0.50]
1439; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
1440; GENERIC-NEXT: retq # sched: [1:1.00]
1441;
1442; HASWELL-LABEL: test_pbroadcastq_ymm:
1443; HASWELL: # BB#0:
1444; HASWELL-NEXT: vpbroadcastq %xmm0, %ymm0 # sched: [3:1.00]
1445; HASWELL-NEXT: vpbroadcastq (%rdi), %ymm1 # sched: [1:0.50]
1446; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1447; HASWELL-NEXT: retq # sched: [2:1.00]
1448;
1449; SKYLAKE-LABEL: test_pbroadcastq_ymm:
1450; SKYLAKE: # BB#0:
1451; SKYLAKE-NEXT: vpbroadcastq %xmm0, %ymm0 # sched: [3:1.00]
1452; SKYLAKE-NEXT: vpbroadcastq (%rdi), %ymm1 # sched: [1:0.50]
1453; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1454; SKYLAKE-NEXT: retq # sched: [2:1.00]
1455;
1456; ZNVER1-LABEL: test_pbroadcastq_ymm:
1457; ZNVER1: # BB#0:
1458; ZNVER1-NEXT: vpbroadcastq (%rdi), %ymm1 # sched: [8:0.50]
1459; ZNVER1-NEXT: vpbroadcastq %xmm0, %ymm0 # sched: [2:0.25]
1460; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
1461; ZNVER1-NEXT: retq # sched: [1:0.50]
1462 %1 = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> zeroinitializer
1463 %2 = load <4 x i64>, <4 x i64> *%a1, align 32
1464 %3 = shufflevector <4 x i64> %2, <4 x i64> undef, <4 x i32> zeroinitializer
1465 %4 = add <4 x i64> %1, %3
1466 ret <4 x i64> %4
1467}
1468
1469define <8 x i16> @test_pbroadcastw(<8 x i16> %a0, <8 x i16> *%a1) {
1470; GENERIC-LABEL: test_pbroadcastw:
1471; GENERIC: # BB#0:
1472; GENERIC-NEXT: vpbroadcastw %xmm0, %xmm0 # sched: [1:1.00]
1473; GENERIC-NEXT: vpbroadcastw (%rdi), %xmm1 # sched: [4:0.50]
1474; GENERIC-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
1475; GENERIC-NEXT: retq # sched: [1:1.00]
1476;
1477; HASWELL-LABEL: test_pbroadcastw:
1478; HASWELL: # BB#0:
1479; HASWELL-NEXT: vpbroadcastw %xmm0, %xmm0 # sched: [3:1.00]
1480; HASWELL-NEXT: vpbroadcastw (%rdi), %xmm1 # sched: [4:1.00]
1481; HASWELL-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
1482; HASWELL-NEXT: retq # sched: [2:1.00]
1483;
1484; SKYLAKE-LABEL: test_pbroadcastw:
1485; SKYLAKE: # BB#0:
1486; SKYLAKE-NEXT: vpbroadcastw %xmm0, %xmm0 # sched: [3:1.00]
1487; SKYLAKE-NEXT: vpbroadcastw (%rdi), %xmm1 # sched: [4:1.00]
1488; SKYLAKE-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
1489; SKYLAKE-NEXT: retq # sched: [2:1.00]
1490;
1491; ZNVER1-LABEL: test_pbroadcastw:
1492; ZNVER1: # BB#0:
1493; ZNVER1-NEXT: vpbroadcastw (%rdi), %xmm1 # sched: [8:1.00]
1494; ZNVER1-NEXT: vpbroadcastw %xmm0, %xmm0 # sched: [1:0.25]
1495; ZNVER1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 # sched: [1:0.25]
1496; ZNVER1-NEXT: retq # sched: [1:0.50]
1497 %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> zeroinitializer
1498 %2 = load <8 x i16>, <8 x i16> *%a1, align 16
1499 %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> zeroinitializer
1500 %4 = add <8 x i16> %1, %3
1501 ret <8 x i16> %4
1502}
1503
1504define <16 x i16> @test_pbroadcastw_ymm(<16 x i16> %a0, <16 x i16> *%a1) {
1505; GENERIC-LABEL: test_pbroadcastw_ymm:
1506; GENERIC: # BB#0:
1507; GENERIC-NEXT: vpbroadcastw %xmm0, %ymm0 # sched: [1:1.00]
1508; GENERIC-NEXT: vpbroadcastw (%rdi), %ymm1 # sched: [4:0.50]
1509; GENERIC-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
1510; GENERIC-NEXT: retq # sched: [1:1.00]
1511;
1512; HASWELL-LABEL: test_pbroadcastw_ymm:
1513; HASWELL: # BB#0:
1514; HASWELL-NEXT: vpbroadcastw %xmm0, %ymm0 # sched: [3:1.00]
1515; HASWELL-NEXT: vpbroadcastw (%rdi), %ymm1 # sched: [4:1.00]
1516; HASWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1517; HASWELL-NEXT: retq # sched: [2:1.00]
1518;
1519; SKYLAKE-LABEL: test_pbroadcastw_ymm:
1520; SKYLAKE: # BB#0:
1521; SKYLAKE-NEXT: vpbroadcastw %xmm0, %ymm0 # sched: [3:1.00]
1522; SKYLAKE-NEXT: vpbroadcastw (%rdi), %ymm1 # sched: [4:1.00]
1523; SKYLAKE-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1524; SKYLAKE-NEXT: retq # sched: [2:1.00]
1525;
1526; ZNVER1-LABEL: test_pbroadcastw_ymm:
1527; ZNVER1: # BB#0:
1528; ZNVER1-NEXT: vpbroadcastw (%rdi), %ymm1 # sched: [8:2.00]
1529; ZNVER1-NEXT: vpbroadcastw %xmm0, %ymm0 # sched: [2:0.25]
1530; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
1531; ZNVER1-NEXT: retq # sched: [1:0.50]
1532 %1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> zeroinitializer
1533 %2 = load <16 x i16>, <16 x i16> *%a1, align 32
1534 %3 = shufflevector <16 x i16> %2, <16 x i16> undef, <16 x i32> zeroinitializer
1535 %4 = add <16 x i16> %1, %3
1536 ret <16 x i16> %4
1537}
1538
Simon Pilgrim0af5a7722017-09-12 15:01:20 +00001539define <32 x i8> @test_pcmpeqb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
1540; GENERIC-LABEL: test_pcmpeqb:
1541; GENERIC: # BB#0:
1542; GENERIC-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
1543; GENERIC-NEXT: vpcmpeqb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
1544; GENERIC-NEXT: retq # sched: [1:1.00]
1545;
1546; HASWELL-LABEL: test_pcmpeqb:
1547; HASWELL: # BB#0:
1548; HASWELL-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1549; HASWELL-NEXT: vpcmpeqb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
1550; HASWELL-NEXT: retq # sched: [2:1.00]
1551;
1552; SKYLAKE-LABEL: test_pcmpeqb:
1553; SKYLAKE: # BB#0:
1554; SKYLAKE-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1555; SKYLAKE-NEXT: vpcmpeqb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
1556; SKYLAKE-NEXT: retq # sched: [2:1.00]
1557;
1558; ZNVER1-LABEL: test_pcmpeqb:
1559; ZNVER1: # BB#0:
1560; ZNVER1-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
1561; ZNVER1-NEXT: vpcmpeqb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
1562; ZNVER1-NEXT: retq # sched: [1:0.50]
1563 %1 = icmp eq <32 x i8> %a0, %a1
1564 %2 = sext <32 x i1> %1 to <32 x i8>
1565 %3 = load <32 x i8>, <32 x i8> *%a2, align 32
1566 %4 = icmp eq <32 x i8> %2, %3
1567 %5 = sext <32 x i1> %4 to <32 x i8>
1568 ret <32 x i8> %5
1569}
1570
1571define <8 x i32> @test_pcmpeqd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
1572; GENERIC-LABEL: test_pcmpeqd:
1573; GENERIC: # BB#0:
1574; GENERIC-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
1575; GENERIC-NEXT: vpcmpeqd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
1576; GENERIC-NEXT: retq # sched: [1:1.00]
1577;
1578; HASWELL-LABEL: test_pcmpeqd:
1579; HASWELL: # BB#0:
1580; HASWELL-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1581; HASWELL-NEXT: vpcmpeqd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
1582; HASWELL-NEXT: retq # sched: [2:1.00]
1583;
1584; SKYLAKE-LABEL: test_pcmpeqd:
1585; SKYLAKE: # BB#0:
1586; SKYLAKE-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1587; SKYLAKE-NEXT: vpcmpeqd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
1588; SKYLAKE-NEXT: retq # sched: [2:1.00]
1589;
1590; ZNVER1-LABEL: test_pcmpeqd:
1591; ZNVER1: # BB#0:
1592; ZNVER1-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
1593; ZNVER1-NEXT: vpcmpeqd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
1594; ZNVER1-NEXT: retq # sched: [1:0.50]
1595 %1 = icmp eq <8 x i32> %a0, %a1
1596 %2 = sext <8 x i1> %1 to <8 x i32>
1597 %3 = load <8 x i32>, <8 x i32> *%a2, align 32
1598 %4 = icmp eq <8 x i32> %2, %3
1599 %5 = sext <8 x i1> %4 to <8 x i32>
1600 ret <8 x i32> %5
1601}
1602
1603define <4 x i64> @test_pcmpeqq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
1604; GENERIC-LABEL: test_pcmpeqq:
1605; GENERIC: # BB#0:
1606; GENERIC-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
1607; GENERIC-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
1608; GENERIC-NEXT: retq # sched: [1:1.00]
1609;
1610; HASWELL-LABEL: test_pcmpeqq:
1611; HASWELL: # BB#0:
1612; HASWELL-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1613; HASWELL-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
1614; HASWELL-NEXT: retq # sched: [2:1.00]
1615;
1616; SKYLAKE-LABEL: test_pcmpeqq:
1617; SKYLAKE: # BB#0:
1618; SKYLAKE-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1619; SKYLAKE-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
1620; SKYLAKE-NEXT: retq # sched: [2:1.00]
1621;
1622; ZNVER1-LABEL: test_pcmpeqq:
1623; ZNVER1: # BB#0:
1624; ZNVER1-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
1625; ZNVER1-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
1626; ZNVER1-NEXT: retq # sched: [1:0.50]
1627 %1 = icmp eq <4 x i64> %a0, %a1
1628 %2 = sext <4 x i1> %1 to <4 x i64>
1629 %3 = load <4 x i64>, <4 x i64> *%a2, align 32
1630 %4 = icmp eq <4 x i64> %2, %3
1631 %5 = sext <4 x i1> %4 to <4 x i64>
1632 ret <4 x i64> %5
1633}
1634
1635define <16 x i16> @test_pcmpeqw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
1636; GENERIC-LABEL: test_pcmpeqw:
1637; GENERIC: # BB#0:
1638; GENERIC-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
1639; GENERIC-NEXT: vpcmpeqw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
1640; GENERIC-NEXT: retq # sched: [1:1.00]
1641;
1642; HASWELL-LABEL: test_pcmpeqw:
1643; HASWELL: # BB#0:
1644; HASWELL-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1645; HASWELL-NEXT: vpcmpeqw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
1646; HASWELL-NEXT: retq # sched: [2:1.00]
1647;
1648; SKYLAKE-LABEL: test_pcmpeqw:
1649; SKYLAKE: # BB#0:
1650; SKYLAKE-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1651; SKYLAKE-NEXT: vpcmpeqw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
1652; SKYLAKE-NEXT: retq # sched: [2:1.00]
1653;
1654; ZNVER1-LABEL: test_pcmpeqw:
1655; ZNVER1: # BB#0:
1656; ZNVER1-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
1657; ZNVER1-NEXT: vpcmpeqw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
1658; ZNVER1-NEXT: retq # sched: [1:0.50]
1659 %1 = icmp eq <16 x i16> %a0, %a1
1660 %2 = sext <16 x i1> %1 to <16 x i16>
1661 %3 = load <16 x i16>, <16 x i16> *%a2, align 32
1662 %4 = icmp eq <16 x i16> %2, %3
1663 %5 = sext <16 x i1> %4 to <16 x i16>
1664 ret <16 x i16> %5
1665}
1666
1667define <32 x i8> @test_pcmpgtb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
1668; GENERIC-LABEL: test_pcmpgtb:
1669; GENERIC: # BB#0:
1670; GENERIC-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
1671; GENERIC-NEXT: vpcmpgtb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
1672; GENERIC-NEXT: retq # sched: [1:1.00]
1673;
1674; HASWELL-LABEL: test_pcmpgtb:
1675; HASWELL: # BB#0:
1676; HASWELL-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1677; HASWELL-NEXT: vpcmpgtb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
1678; HASWELL-NEXT: retq # sched: [2:1.00]
1679;
1680; SKYLAKE-LABEL: test_pcmpgtb:
1681; SKYLAKE: # BB#0:
1682; SKYLAKE-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1683; SKYLAKE-NEXT: vpcmpgtb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
1684; SKYLAKE-NEXT: retq # sched: [2:1.00]
1685;
1686; ZNVER1-LABEL: test_pcmpgtb:
1687; ZNVER1: # BB#0:
1688; ZNVER1-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
1689; ZNVER1-NEXT: vpcmpgtb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
1690; ZNVER1-NEXT: retq # sched: [1:0.50]
1691 %1 = icmp sgt <32 x i8> %a0, %a1
1692 %2 = sext <32 x i1> %1 to <32 x i8>
1693 %3 = load <32 x i8>, <32 x i8> *%a2, align 32
1694 %4 = icmp sgt <32 x i8> %2, %3
1695 %5 = sext <32 x i1> %4 to <32 x i8>
1696 ret <32 x i8> %5
1697}
1698
1699define <8 x i32> @test_pcmpgtd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
1700; GENERIC-LABEL: test_pcmpgtd:
1701; GENERIC: # BB#0:
1702; GENERIC-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
1703; GENERIC-NEXT: vpcmpgtd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
1704; GENERIC-NEXT: retq # sched: [1:1.00]
1705;
1706; HASWELL-LABEL: test_pcmpgtd:
1707; HASWELL: # BB#0:
1708; HASWELL-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1709; HASWELL-NEXT: vpcmpgtd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
1710; HASWELL-NEXT: retq # sched: [2:1.00]
1711;
1712; SKYLAKE-LABEL: test_pcmpgtd:
1713; SKYLAKE: # BB#0:
1714; SKYLAKE-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1715; SKYLAKE-NEXT: vpcmpgtd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
1716; SKYLAKE-NEXT: retq # sched: [2:1.00]
1717;
1718; ZNVER1-LABEL: test_pcmpgtd:
1719; ZNVER1: # BB#0:
1720; ZNVER1-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
1721; ZNVER1-NEXT: vpcmpgtd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
1722; ZNVER1-NEXT: retq # sched: [1:0.50]
1723 %1 = icmp sgt <8 x i32> %a0, %a1
1724 %2 = sext <8 x i1> %1 to <8 x i32>
1725 %3 = load <8 x i32>, <8 x i32> *%a2, align 32
1726 %4 = icmp sgt <8 x i32> %2, %3
1727 %5 = sext <8 x i1> %4 to <8 x i32>
1728 ret <8 x i32> %5
1729}
1730
1731define <4 x i64> @test_pcmpgtq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
1732; GENERIC-LABEL: test_pcmpgtq:
1733; GENERIC: # BB#0:
1734; GENERIC-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
1735; GENERIC-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
1736; GENERIC-NEXT: retq # sched: [1:1.00]
1737;
1738; HASWELL-LABEL: test_pcmpgtq:
1739; HASWELL: # BB#0:
1740; HASWELL-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
1741; HASWELL-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
1742; HASWELL-NEXT: retq # sched: [2:1.00]
1743;
1744; SKYLAKE-LABEL: test_pcmpgtq:
1745; SKYLAKE: # BB#0:
1746; SKYLAKE-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
1747; SKYLAKE-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
1748; SKYLAKE-NEXT: retq # sched: [2:1.00]
1749;
1750; ZNVER1-LABEL: test_pcmpgtq:
1751; ZNVER1: # BB#0:
1752; ZNVER1-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1753; ZNVER1-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0 # sched: [8:1.00]
1754; ZNVER1-NEXT: retq # sched: [1:0.50]
1755 %1 = icmp sgt <4 x i64> %a0, %a1
1756 %2 = sext <4 x i1> %1 to <4 x i64>
1757 %3 = load <4 x i64>, <4 x i64> *%a2, align 32
1758 %4 = icmp sgt <4 x i64> %2, %3
1759 %5 = sext <4 x i1> %4 to <4 x i64>
1760 ret <4 x i64> %5
1761}
1762
1763define <16 x i16> @test_pcmpgtw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
1764; GENERIC-LABEL: test_pcmpgtw:
1765; GENERIC: # BB#0:
1766; GENERIC-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
1767; GENERIC-NEXT: vpcmpgtw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
1768; GENERIC-NEXT: retq # sched: [1:1.00]
1769;
1770; HASWELL-LABEL: test_pcmpgtw:
1771; HASWELL: # BB#0:
1772; HASWELL-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1773; HASWELL-NEXT: vpcmpgtw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
1774; HASWELL-NEXT: retq # sched: [2:1.00]
1775;
1776; SKYLAKE-LABEL: test_pcmpgtw:
1777; SKYLAKE: # BB#0:
1778; SKYLAKE-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1779; SKYLAKE-NEXT: vpcmpgtw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
1780; SKYLAKE-NEXT: retq # sched: [2:1.00]
1781;
1782; ZNVER1-LABEL: test_pcmpgtw:
1783; ZNVER1: # BB#0:
1784; ZNVER1-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
1785; ZNVER1-NEXT: vpcmpgtw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
1786; ZNVER1-NEXT: retq # sched: [1:0.50]
1787 %1 = icmp sgt <16 x i16> %a0, %a1
1788 %2 = sext <16 x i1> %1 to <16 x i16>
1789 %3 = load <16 x i16>, <16 x i16> *%a2, align 32
1790 %4 = icmp sgt <16 x i16> %2, %3
1791 %5 = sext <16 x i1> %4 to <16 x i16>
1792 ret <16 x i16> %5
1793}
1794
Simon Pilgrim5a931c62017-09-12 11:17:01 +00001795define <4 x i64> @test_perm2i128(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
1796; GENERIC-LABEL: test_perm2i128:
1797; GENERIC: # BB#0:
1798; GENERIC-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [1:1.00]
1799; GENERIC-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [5:1.00]
1800; GENERIC-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
1801; GENERIC-NEXT: retq # sched: [1:1.00]
1802;
1803; HASWELL-LABEL: test_perm2i128:
1804; HASWELL: # BB#0:
1805; HASWELL-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
1806; HASWELL-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [3:1.00]
1807; HASWELL-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
1808; HASWELL-NEXT: retq # sched: [2:1.00]
1809;
1810; SKYLAKE-LABEL: test_perm2i128:
1811; SKYLAKE: # BB#0:
1812; SKYLAKE-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [3:1.00]
1813; SKYLAKE-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [3:1.00]
1814; SKYLAKE-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
1815; SKYLAKE-NEXT: retq # sched: [2:1.00]
1816;
1817; ZNVER1-LABEL: test_perm2i128:
1818; ZNVER1: # BB#0:
1819; ZNVER1-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[0,1] sched: [2:0.25]
1820; ZNVER1-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] sched: [9:0.50]
1821; ZNVER1-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
1822; ZNVER1-NEXT: retq # sched: [1:0.50]
1823 %1 = shufflevector <4 x i64> %a0, <4 x i64> %a1, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
1824 %2 = load <4 x i64>, <4 x i64> *%a2, align 32
1825 %3 = shufflevector <4 x i64> %a0, <4 x i64> %2, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
1826 %4 = add <4 x i64> %1, %3
1827 ret <4 x i64> %4
1828}
1829
1830define <8 x i32> @test_permd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
1831; GENERIC-LABEL: test_permd:
1832; GENERIC: # BB#0:
1833; GENERIC-NEXT: vpermd %ymm1, %ymm0, %ymm1 # sched: [1:1.00]
1834; GENERIC-NEXT: vpermd (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
1835; GENERIC-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
1836; GENERIC-NEXT: retq # sched: [1:1.00]
1837;
1838; HASWELL-LABEL: test_permd:
1839; HASWELL: # BB#0:
1840; HASWELL-NEXT: vpermd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
1841; HASWELL-NEXT: vpermd (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
1842; HASWELL-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
1843; HASWELL-NEXT: retq # sched: [2:1.00]
1844;
1845; SKYLAKE-LABEL: test_permd:
1846; SKYLAKE: # BB#0:
1847; SKYLAKE-NEXT: vpermd %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
1848; SKYLAKE-NEXT: vpermd (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
1849; SKYLAKE-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
1850; SKYLAKE-NEXT: retq # sched: [2:1.00]
1851;
1852; ZNVER1-LABEL: test_permd:
1853; ZNVER1: # BB#0:
1854; ZNVER1-NEXT: vpermd %ymm1, %ymm0, %ymm1 # sched: [2:0.25]
1855; ZNVER1-NEXT: vpermd (%rdi), %ymm0, %ymm0 # sched: [9:0.50]
1856; ZNVER1-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
1857; ZNVER1-NEXT: retq # sched: [1:0.50]
1858 %1 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a1, <8 x i32> %a0)
1859 %2 = load <8 x i32>, <8 x i32> *%a2, align 32
1860 %3 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %2, <8 x i32> %a0)
1861 %4 = add <8 x i32> %1, %3
1862 ret <8 x i32> %4
1863}
1864declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>) nounwind readonly
1865
1866define <4 x double> @test_permpd(<4 x double> %a0, <4 x double> *%a1) {
1867; GENERIC-LABEL: test_permpd:
1868; GENERIC: # BB#0:
1869; GENERIC-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [1:1.00]
1870; GENERIC-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,2,2,3] sched: [5:1.00]
1871; GENERIC-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
1872; GENERIC-NEXT: retq # sched: [1:1.00]
1873;
1874; HASWELL-LABEL: test_permpd:
1875; HASWELL: # BB#0:
1876; HASWELL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [3:1.00]
1877; HASWELL-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,2,2,3] sched: [3:1.00]
1878; HASWELL-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
1879; HASWELL-NEXT: retq # sched: [2:1.00]
1880;
1881; SKYLAKE-LABEL: test_permpd:
1882; SKYLAKE: # BB#0:
1883; SKYLAKE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [3:1.00]
1884; SKYLAKE-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,2,2,3] sched: [3:1.00]
1885; SKYLAKE-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
1886; SKYLAKE-NEXT: retq # sched: [2:1.00]
1887;
1888; ZNVER1-LABEL: test_permpd:
1889; ZNVER1: # BB#0:
1890; ZNVER1-NEXT: vpermpd {{.*#+}} ymm1 = mem[0,2,2,3] sched: [107:0.50]
1891; ZNVER1-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [100:0.25]
1892; ZNVER1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
1893; ZNVER1-NEXT: retq # sched: [1:0.50]
1894 %1 = shufflevector <4 x double> %a0, <4 x double> undef, <4 x i32> <i32 3, i32 2, i32 2, i32 3>
1895 %2 = load <4 x double>, <4 x double> *%a1, align 32
1896 %3 = shufflevector <4 x double> %2, <4 x double> undef, <4 x i32> <i32 0, i32 2, i32 2, i32 3>
1897 %4 = fadd <4 x double> %1, %3
1898 ret <4 x double> %4
1899}
1900
1901define <8 x float> @test_permps(<8 x i32> %a0, <8 x float> %a1, <8 x float> *%a2) {
1902; GENERIC-LABEL: test_permps:
1903; GENERIC: # BB#0:
1904; GENERIC-NEXT: vpermps %ymm1, %ymm0, %ymm1 # sched: [1:1.00]
1905; GENERIC-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
1906; GENERIC-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
1907; GENERIC-NEXT: retq # sched: [1:1.00]
1908;
1909; HASWELL-LABEL: test_permps:
1910; HASWELL: # BB#0:
1911; HASWELL-NEXT: vpermps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
1912; HASWELL-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
1913; HASWELL-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
1914; HASWELL-NEXT: retq # sched: [2:1.00]
1915;
1916; SKYLAKE-LABEL: test_permps:
1917; SKYLAKE: # BB#0:
1918; SKYLAKE-NEXT: vpermps %ymm1, %ymm0, %ymm1 # sched: [3:1.00]
1919; SKYLAKE-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [3:1.00]
1920; SKYLAKE-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
1921; SKYLAKE-NEXT: retq # sched: [2:1.00]
1922;
1923; ZNVER1-LABEL: test_permps:
1924; ZNVER1: # BB#0:
1925; ZNVER1-NEXT: vpermps %ymm1, %ymm0, %ymm1 # sched: [100:0.25]
1926; ZNVER1-NEXT: vpermps (%rdi), %ymm0, %ymm0 # sched: [107:0.50]
1927; ZNVER1-NEXT: vaddps %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
1928; ZNVER1-NEXT: retq # sched: [1:0.50]
1929 %1 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a1, <8 x i32> %a0)
1930 %2 = load <8 x float>, <8 x float> *%a2, align 32
1931 %3 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %2, <8 x i32> %a0)
1932 %4 = fadd <8 x float> %1, %3
1933 ret <8 x float> %4
1934}
1935declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x i32>) nounwind readonly
1936
1937define <4 x i64> @test_permq(<4 x i64> %a0, <4 x i64> *%a1) {
1938; GENERIC-LABEL: test_permq:
1939; GENERIC: # BB#0:
1940; GENERIC-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [1:1.00]
1941; GENERIC-NEXT: vpermq {{.*#+}} ymm1 = mem[0,2,2,3] sched: [5:1.00]
1942; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
1943; GENERIC-NEXT: retq # sched: [1:1.00]
1944;
1945; HASWELL-LABEL: test_permq:
1946; HASWELL: # BB#0:
1947; HASWELL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [3:1.00]
1948; HASWELL-NEXT: vpermq {{.*#+}} ymm1 = mem[0,2,2,3] sched: [3:1.00]
1949; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1950; HASWELL-NEXT: retq # sched: [2:1.00]
1951;
1952; SKYLAKE-LABEL: test_permq:
1953; SKYLAKE: # BB#0:
1954; SKYLAKE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [3:1.00]
1955; SKYLAKE-NEXT: vpermq {{.*#+}} ymm1 = mem[0,2,2,3] sched: [3:1.00]
1956; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
1957; SKYLAKE-NEXT: retq # sched: [2:1.00]
1958;
1959; ZNVER1-LABEL: test_permq:
1960; ZNVER1: # BB#0:
1961; ZNVER1-NEXT: vpermq {{.*#+}} ymm1 = mem[0,2,2,3] sched: [9:0.50]
1962; ZNVER1-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,2,2,3] sched: [2:0.25]
1963; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
1964; ZNVER1-NEXT: retq # sched: [1:0.50]
1965 %1 = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 2, i32 3>
1966 %2 = load <4 x i64>, <4 x i64> *%a1, align 32
1967 %3 = shufflevector <4 x i64> %2, <4 x i64> undef, <4 x i32> <i32 0, i32 2, i32 2, i32 3>
1968 %4 = add <4 x i64> %1, %3
1969 ret <4 x i64> %4
1970}
1971
Simon Pilgrim76418aa2017-09-12 15:52:01 +00001972define <4 x i32> @test_pgatherdd(<4 x i32> %a0, i8* %a1, <4 x i32> %a2, <4 x i32> %a3) {
1973; GENERIC-LABEL: test_pgatherdd:
1974; GENERIC: # BB#0:
1975; GENERIC-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0
1976; GENERIC-NEXT: retq # sched: [1:1.00]
1977;
1978; HASWELL-LABEL: test_pgatherdd:
1979; HASWELL: # BB#0:
1980; HASWELL-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
1981; HASWELL-NEXT: retq # sched: [2:1.00]
1982;
1983; SKYLAKE-LABEL: test_pgatherdd:
1984; SKYLAKE: # BB#0:
1985; SKYLAKE-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
1986; SKYLAKE-NEXT: retq # sched: [2:1.00]
1987;
1988; ZNVER1-LABEL: test_pgatherdd:
1989; ZNVER1: # BB#0:
1990; ZNVER1-NEXT: vpgatherdd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
1991; ZNVER1-NEXT: retq # sched: [1:0.50]
1992 %1 = call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> %a0, i8* %a1, <4 x i32> %a2, <4 x i32> %a3, i8 2)
1993 ret <4 x i32> %1
1994}
1995declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, i8*, <4 x i32>, <4 x i32>, i8) nounwind readonly
1996
1997define <8 x i32> @test_pgatherdd_ymm(<8 x i32> %a0, i8* %a1, <8 x i32> %a2, <8 x i32> %a3) {
1998; GENERIC-LABEL: test_pgatherdd_ymm:
1999; GENERIC: # BB#0:
2000; GENERIC-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0
2001; GENERIC-NEXT: retq # sched: [1:1.00]
2002;
2003; HASWELL-LABEL: test_pgatherdd_ymm:
2004; HASWELL: # BB#0:
2005; HASWELL-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [1:?]
2006; HASWELL-NEXT: retq # sched: [2:1.00]
2007;
2008; SKYLAKE-LABEL: test_pgatherdd_ymm:
2009; SKYLAKE: # BB#0:
2010; SKYLAKE-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [1:?]
2011; SKYLAKE-NEXT: retq # sched: [2:1.00]
2012;
2013; ZNVER1-LABEL: test_pgatherdd_ymm:
2014; ZNVER1: # BB#0:
2015; ZNVER1-NEXT: vpgatherdd %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [100:?]
2016; ZNVER1-NEXT: retq # sched: [1:0.50]
2017 %1 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> %a0, i8* %a1, <8 x i32> %a2, <8 x i32> %a3, i8 2)
2018 ret <8 x i32> %1
2019}
2020declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32>, i8*, <8 x i32>, <8 x i32>, i8) nounwind readonly
2021
2022define <2 x i64> @test_pgatherdq(<2 x i64> %a0, i8* %a1, <4 x i32> %a2, <2 x i64> %a3) {
2023; GENERIC-LABEL: test_pgatherdq:
2024; GENERIC: # BB#0:
2025; GENERIC-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0
2026; GENERIC-NEXT: retq # sched: [1:1.00]
2027;
2028; HASWELL-LABEL: test_pgatherdq:
2029; HASWELL: # BB#0:
2030; HASWELL-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
2031; HASWELL-NEXT: retq # sched: [2:1.00]
2032;
2033; SKYLAKE-LABEL: test_pgatherdq:
2034; SKYLAKE: # BB#0:
2035; SKYLAKE-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
2036; SKYLAKE-NEXT: retq # sched: [2:1.00]
2037;
2038; ZNVER1-LABEL: test_pgatherdq:
2039; ZNVER1: # BB#0:
2040; ZNVER1-NEXT: vpgatherdq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
2041; ZNVER1-NEXT: retq # sched: [1:0.50]
2042 %1 = call <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64> %a0, i8* %a1, <4 x i32> %a2, <2 x i64> %a3, i8 2)
2043 ret <2 x i64> %1
2044}
2045declare <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64>, i8*, <4 x i32>, <2 x i64>, i8) nounwind readonly
2046
2047define <4 x i64> @test_pgatherdq_ymm(<4 x i64> %a0, i8* %a1, <4 x i32> %a2, <4 x i64> %a3) {
2048; GENERIC-LABEL: test_pgatherdq_ymm:
2049; GENERIC: # BB#0:
2050; GENERIC-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0
2051; GENERIC-NEXT: retq # sched: [1:1.00]
2052;
2053; HASWELL-LABEL: test_pgatherdq_ymm:
2054; HASWELL: # BB#0:
2055; HASWELL-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0 # sched: [1:?]
2056; HASWELL-NEXT: retq # sched: [2:1.00]
2057;
2058; SKYLAKE-LABEL: test_pgatherdq_ymm:
2059; SKYLAKE: # BB#0:
2060; SKYLAKE-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0 # sched: [1:?]
2061; SKYLAKE-NEXT: retq # sched: [2:1.00]
2062;
2063; ZNVER1-LABEL: test_pgatherdq_ymm:
2064; ZNVER1: # BB#0:
2065; ZNVER1-NEXT: vpgatherdq %ymm2, (%rdi,%xmm1,2), %ymm0 # sched: [100:?]
2066; ZNVER1-NEXT: retq # sched: [1:0.50]
2067 %1 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> %a0, i8* %a1, <4 x i32> %a2, <4 x i64> %a3, i8 2)
2068 ret <4 x i64> %1
2069}
2070declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64>, i8*, <4 x i32>, <4 x i64>, i8) nounwind readonly
2071
2072define <4 x i32> @test_pgatherqd(<4 x i32> %a0, i8* %a1, <2 x i64> %a2, <4 x i32> %a3) {
2073; GENERIC-LABEL: test_pgatherqd:
2074; GENERIC: # BB#0:
2075; GENERIC-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0
2076; GENERIC-NEXT: retq # sched: [1:1.00]
2077;
2078; HASWELL-LABEL: test_pgatherqd:
2079; HASWELL: # BB#0:
2080; HASWELL-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
2081; HASWELL-NEXT: retq # sched: [2:1.00]
2082;
2083; SKYLAKE-LABEL: test_pgatherqd:
2084; SKYLAKE: # BB#0:
2085; SKYLAKE-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
2086; SKYLAKE-NEXT: retq # sched: [2:1.00]
2087;
2088; ZNVER1-LABEL: test_pgatherqd:
2089; ZNVER1: # BB#0:
2090; ZNVER1-NEXT: vpgatherqd %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
2091; ZNVER1-NEXT: retq # sched: [1:0.50]
2092 %1 = call <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32> %a0, i8* %a1, <2 x i64> %a2, <4 x i32> %a3, i8 2)
2093 ret <4 x i32> %1
2094}
2095declare <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32>, i8*, <2 x i64>, <4 x i32>, i8) nounwind readonly
2096
2097define <4 x i32> @test_pgatherqd_ymm(<4 x i32> %a0, i8* %a1, <4 x i64> %a2, <4 x i32> %a3) {
2098; GENERIC-LABEL: test_pgatherqd_ymm:
2099; GENERIC: # BB#0:
2100; GENERIC-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0
2101; GENERIC-NEXT: vzeroupper
2102; GENERIC-NEXT: retq # sched: [1:1.00]
2103;
2104; HASWELL-LABEL: test_pgatherqd_ymm:
2105; HASWELL: # BB#0:
2106; HASWELL-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0 # sched: [1:?]
2107; HASWELL-NEXT: vzeroupper # sched: [4:1.00]
2108; HASWELL-NEXT: retq # sched: [2:1.00]
2109;
2110; SKYLAKE-LABEL: test_pgatherqd_ymm:
2111; SKYLAKE: # BB#0:
2112; SKYLAKE-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0 # sched: [1:?]
2113; SKYLAKE-NEXT: vzeroupper # sched: [4:1.00]
2114; SKYLAKE-NEXT: retq # sched: [2:1.00]
2115;
2116; ZNVER1-LABEL: test_pgatherqd_ymm:
2117; ZNVER1: # BB#0:
2118; ZNVER1-NEXT: vpgatherqd %xmm2, (%rdi,%ymm1,2), %xmm0 # sched: [100:?]
2119; ZNVER1-NEXT: vzeroupper # sched: [100:?]
2120; ZNVER1-NEXT: retq # sched: [1:0.50]
2121 %1 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> %a0, i8* %a1, <4 x i64> %a2, <4 x i32> %a3, i8 2)
2122 ret <4 x i32> %1
2123}
2124declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32>, i8*, <4 x i64>, <4 x i32>, i8) nounwind readonly
2125
2126define <2 x i64> @test_pgatherqq(<2 x i64> %a0, i8 *%a1, <2 x i64> %a2, <2 x i64> %a3) {
2127; GENERIC-LABEL: test_pgatherqq:
2128; GENERIC: # BB#0:
2129; GENERIC-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0
2130; GENERIC-NEXT: retq # sched: [1:1.00]
2131;
2132; HASWELL-LABEL: test_pgatherqq:
2133; HASWELL: # BB#0:
2134; HASWELL-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
2135; HASWELL-NEXT: retq # sched: [2:1.00]
2136;
2137; SKYLAKE-LABEL: test_pgatherqq:
2138; SKYLAKE: # BB#0:
2139; SKYLAKE-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [1:?]
2140; SKYLAKE-NEXT: retq # sched: [2:1.00]
2141;
2142; ZNVER1-LABEL: test_pgatherqq:
2143; ZNVER1: # BB#0:
2144; ZNVER1-NEXT: vpgatherqq %xmm2, (%rdi,%xmm1,2), %xmm0 # sched: [100:?]
2145; ZNVER1-NEXT: retq # sched: [1:0.50]
2146 %1 = call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> %a0, i8* %a1, <2 x i64> %a2, <2 x i64> %a3, i8 2)
2147 ret <2 x i64> %1
2148}
2149declare <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64>, i8*, <2 x i64>, <2 x i64>, i8) nounwind readonly
2150
2151define <4 x i64> @test_pgatherqq_ymm(<4 x i64> %a0, i8 *%a1, <4 x i64> %a2, <4 x i64> %a3) {
2152; GENERIC-LABEL: test_pgatherqq_ymm:
2153; GENERIC: # BB#0:
2154; GENERIC-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0
2155; GENERIC-NEXT: retq # sched: [1:1.00]
2156;
2157; HASWELL-LABEL: test_pgatherqq_ymm:
2158; HASWELL: # BB#0:
2159; HASWELL-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [1:?]
2160; HASWELL-NEXT: retq # sched: [2:1.00]
2161;
2162; SKYLAKE-LABEL: test_pgatherqq_ymm:
2163; SKYLAKE: # BB#0:
2164; SKYLAKE-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [1:?]
2165; SKYLAKE-NEXT: retq # sched: [2:1.00]
2166;
2167; ZNVER1-LABEL: test_pgatherqq_ymm:
2168; ZNVER1: # BB#0:
2169; ZNVER1-NEXT: vpgatherqq %ymm2, (%rdi,%ymm1,2), %ymm0 # sched: [100:?]
2170; ZNVER1-NEXT: retq # sched: [1:0.50]
2171 %1 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> %a0, i8* %a1, <4 x i64> %a2, <4 x i64> %a3, i8 2)
2172 ret <4 x i64> %1
2173}
2174declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64>, i8*, <4 x i64>, <4 x i64>, i8) nounwind readonly
2175
Simon Pilgrim0af5a7722017-09-12 15:01:20 +00002176define <8 x i32> @test_phaddd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
2177; GENERIC-LABEL: test_phaddd:
2178; GENERIC: # BB#0:
2179; GENERIC-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2180; GENERIC-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
2181; GENERIC-NEXT: retq # sched: [1:1.00]
2182;
2183; HASWELL-LABEL: test_phaddd:
2184; HASWELL: # BB#0:
2185; HASWELL-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
2186; HASWELL-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
2187; HASWELL-NEXT: retq # sched: [2:1.00]
2188;
2189; SKYLAKE-LABEL: test_phaddd:
2190; SKYLAKE: # BB#0:
2191; SKYLAKE-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
2192; SKYLAKE-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
2193; SKYLAKE-NEXT: retq # sched: [2:1.00]
2194;
2195; ZNVER1-LABEL: test_phaddd:
2196; ZNVER1: # BB#0:
2197; ZNVER1-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [100:?]
2198; ZNVER1-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [100:?]
2199; ZNVER1-NEXT: retq # sched: [1:0.50]
2200 %1 = call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %a0, <8 x i32> %a1)
2201 %2 = load <8 x i32>, <8 x i32> *%a2, align 32
2202 %3 = call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %1, <8 x i32> %2)
2203 ret <8 x i32> %3
2204}
2205declare <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32>, <8 x i32>) nounwind readnone
2206
2207define <16 x i16> @test_phaddsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
2208; GENERIC-LABEL: test_phaddsw:
2209; GENERIC: # BB#0:
2210; GENERIC-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
2211; GENERIC-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
2212; GENERIC-NEXT: retq # sched: [1:1.00]
2213;
2214; HASWELL-LABEL: test_phaddsw:
2215; HASWELL: # BB#0:
2216; HASWELL-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
2217; HASWELL-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
2218; HASWELL-NEXT: retq # sched: [2:1.00]
2219;
2220; SKYLAKE-LABEL: test_phaddsw:
2221; SKYLAKE: # BB#0:
2222; SKYLAKE-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
2223; SKYLAKE-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
2224; SKYLAKE-NEXT: retq # sched: [2:1.00]
2225;
2226; ZNVER1-LABEL: test_phaddsw:
2227; ZNVER1: # BB#0:
2228; ZNVER1-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [100:?]
2229; ZNVER1-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [100:?]
2230; ZNVER1-NEXT: retq # sched: [1:0.50]
2231 %1 = call <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16> %a0, <16 x i16> %a1)
2232 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
2233 %3 = call <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16> %1, <16 x i16> %2)
2234 ret <16 x i16> %3
2235}
2236declare <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16>, <16 x i16>) nounwind readnone
2237
2238define <16 x i16> @test_phaddw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
2239; GENERIC-LABEL: test_phaddw:
2240; GENERIC: # BB#0:
2241; GENERIC-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2242; GENERIC-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
2243; GENERIC-NEXT: retq # sched: [1:1.00]
2244;
2245; HASWELL-LABEL: test_phaddw:
2246; HASWELL: # BB#0:
2247; HASWELL-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
2248; HASWELL-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
2249; HASWELL-NEXT: retq # sched: [2:1.00]
2250;
2251; SKYLAKE-LABEL: test_phaddw:
2252; SKYLAKE: # BB#0:
2253; SKYLAKE-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
2254; SKYLAKE-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
2255; SKYLAKE-NEXT: retq # sched: [2:1.00]
2256;
2257; ZNVER1-LABEL: test_phaddw:
2258; ZNVER1: # BB#0:
2259; ZNVER1-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [100:?]
2260; ZNVER1-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [100:?]
2261; ZNVER1-NEXT: retq # sched: [1:0.50]
2262 %1 = call <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16> %a0, <16 x i16> %a1)
2263 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
2264 %3 = call <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16> %1, <16 x i16> %2)
2265 ret <16 x i16> %3
2266}
2267declare <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16>, <16 x i16>) nounwind readnone
2268
2269define <8 x i32> @test_phsubd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
2270; GENERIC-LABEL: test_phsubd:
2271; GENERIC: # BB#0:
2272; GENERIC-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2273; GENERIC-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
2274; GENERIC-NEXT: retq # sched: [1:1.00]
2275;
2276; HASWELL-LABEL: test_phsubd:
2277; HASWELL: # BB#0:
2278; HASWELL-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
2279; HASWELL-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
2280; HASWELL-NEXT: retq # sched: [2:1.00]
2281;
2282; SKYLAKE-LABEL: test_phsubd:
2283; SKYLAKE: # BB#0:
2284; SKYLAKE-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
2285; SKYLAKE-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
2286; SKYLAKE-NEXT: retq # sched: [2:1.00]
2287;
2288; ZNVER1-LABEL: test_phsubd:
2289; ZNVER1: # BB#0:
2290; ZNVER1-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [100:?]
2291; ZNVER1-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [100:?]
2292; ZNVER1-NEXT: retq # sched: [1:0.50]
2293 %1 = call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %a0, <8 x i32> %a1)
2294 %2 = load <8 x i32>, <8 x i32> *%a2, align 32
2295 %3 = call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %1, <8 x i32> %2)
2296 ret <8 x i32> %3
2297}
2298declare <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32>, <8 x i32>) nounwind readnone
2299
2300define <16 x i16> @test_phsubsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
2301; GENERIC-LABEL: test_phsubsw:
2302; GENERIC: # BB#0:
2303; GENERIC-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
2304; GENERIC-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
2305; GENERIC-NEXT: retq # sched: [1:1.00]
2306;
2307; HASWELL-LABEL: test_phsubsw:
2308; HASWELL: # BB#0:
2309; HASWELL-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
2310; HASWELL-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
2311; HASWELL-NEXT: retq # sched: [2:1.00]
2312;
2313; SKYLAKE-LABEL: test_phsubsw:
2314; SKYLAKE: # BB#0:
2315; SKYLAKE-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
2316; SKYLAKE-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
2317; SKYLAKE-NEXT: retq # sched: [2:1.00]
2318;
2319; ZNVER1-LABEL: test_phsubsw:
2320; ZNVER1: # BB#0:
2321; ZNVER1-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [100:?]
2322; ZNVER1-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [100:?]
2323; ZNVER1-NEXT: retq # sched: [1:0.50]
2324 %1 = call <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16> %a0, <16 x i16> %a1)
2325 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
2326 %3 = call <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16> %1, <16 x i16> %2)
2327 ret <16 x i16> %3
2328}
2329declare <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16>, <16 x i16>) nounwind readnone
2330
2331define <16 x i16> @test_phsubw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
2332; GENERIC-LABEL: test_phsubw:
2333; GENERIC: # BB#0:
2334; GENERIC-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2335; GENERIC-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [5:0.50]
2336; GENERIC-NEXT: retq # sched: [1:1.00]
2337;
2338; HASWELL-LABEL: test_phsubw:
2339; HASWELL: # BB#0:
2340; HASWELL-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
2341; HASWELL-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
2342; HASWELL-NEXT: retq # sched: [2:1.00]
2343;
2344; SKYLAKE-LABEL: test_phsubw:
2345; SKYLAKE: # BB#0:
2346; SKYLAKE-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
2347; SKYLAKE-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
2348; SKYLAKE-NEXT: retq # sched: [2:1.00]
2349;
2350; ZNVER1-LABEL: test_phsubw:
2351; ZNVER1: # BB#0:
2352; ZNVER1-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [100:?]
2353; ZNVER1-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [100:?]
2354; ZNVER1-NEXT: retq # sched: [1:0.50]
2355 %1 = call <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16> %a0, <16 x i16> %a1)
2356 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
2357 %3 = call <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16> %1, <16 x i16> %2)
2358 ret <16 x i16> %3
2359}
2360declare <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16>, <16 x i16>) nounwind readnone
2361
2362define <16 x i16> @test_pmaddubsw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
2363; GENERIC-LABEL: test_pmaddubsw:
2364; GENERIC: # BB#0:
2365; GENERIC-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
2366; GENERIC-NEXT: vpmaddubsw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
2367; GENERIC-NEXT: retq # sched: [1:1.00]
2368;
2369; HASWELL-LABEL: test_pmaddubsw:
2370; HASWELL: # BB#0:
2371; HASWELL-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
2372; HASWELL-NEXT: vpmaddubsw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
2373; HASWELL-NEXT: retq # sched: [2:1.00]
2374;
2375; SKYLAKE-LABEL: test_pmaddubsw:
2376; SKYLAKE: # BB#0:
2377; SKYLAKE-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
2378; SKYLAKE-NEXT: vpmaddubsw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
2379; SKYLAKE-NEXT: retq # sched: [2:1.00]
2380;
2381; ZNVER1-LABEL: test_pmaddubsw:
2382; ZNVER1: # BB#0:
2383; ZNVER1-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
2384; ZNVER1-NEXT: vpmaddubsw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
2385; ZNVER1-NEXT: retq # sched: [1:0.50]
2386 %1 = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %a0, <32 x i8> %a1)
2387 %2 = bitcast <16 x i16> %1 to <32 x i8>
2388 %3 = load <32 x i8>, <32 x i8> *%a2, align 32
2389 %4 = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %2, <32 x i8> %3)
2390 ret <16 x i16> %4
2391}
2392declare <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8>, <32 x i8>) nounwind readnone
2393
2394define <8 x i32> @test_pmaddwd(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
2395; GENERIC-LABEL: test_pmaddwd:
2396; GENERIC: # BB#0:
2397; GENERIC-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
2398; GENERIC-NEXT: vpmaddwd (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
2399; GENERIC-NEXT: retq # sched: [1:1.00]
2400;
2401; HASWELL-LABEL: test_pmaddwd:
2402; HASWELL: # BB#0:
2403; HASWELL-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
2404; HASWELL-NEXT: vpmaddwd (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
2405; HASWELL-NEXT: retq # sched: [2:1.00]
2406;
2407; SKYLAKE-LABEL: test_pmaddwd:
2408; SKYLAKE: # BB#0:
2409; SKYLAKE-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
2410; SKYLAKE-NEXT: vpmaddwd (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
2411; SKYLAKE-NEXT: retq # sched: [2:1.00]
2412;
2413; ZNVER1-LABEL: test_pmaddwd:
2414; ZNVER1: # BB#0:
2415; ZNVER1-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
2416; ZNVER1-NEXT: vpmaddwd (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
2417; ZNVER1-NEXT: retq # sched: [1:0.50]
2418 %1 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a0, <16 x i16> %a1)
2419 %2 = bitcast <8 x i32> %1 to <16 x i16>
2420 %3 = load <16 x i16>, <16 x i16> *%a2, align 32
2421 %4 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %2, <16 x i16> %3)
2422 ret <8 x i32> %4
2423}
2424declare <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16>, <16 x i16>) nounwind readnone
2425
Simon Pilgrim76418aa2017-09-12 15:52:01 +00002426define <4 x i32> @test_pmaskmovd(i8* %a0, <4 x i32> %a1, <4 x i32> %a2) {
2427; GENERIC-LABEL: test_pmaskmovd:
2428; GENERIC: # BB#0:
2429; GENERIC-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2
2430; GENERIC-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi)
2431; GENERIC-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.50]
2432; GENERIC-NEXT: retq # sched: [1:1.00]
2433;
2434; HASWELL-LABEL: test_pmaskmovd:
2435; HASWELL: # BB#0:
2436; HASWELL-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2 # sched: [2:2.00]
2437; HASWELL-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi) # sched: [4:1.00]
2438; HASWELL-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
2439; HASWELL-NEXT: retq # sched: [2:1.00]
2440;
2441; SKYLAKE-LABEL: test_pmaskmovd:
2442; SKYLAKE: # BB#0:
2443; SKYLAKE-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2 # sched: [2:2.00]
2444; SKYLAKE-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi) # sched: [4:1.00]
2445; SKYLAKE-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
2446; SKYLAKE-NEXT: retq # sched: [2:1.00]
2447;
2448; ZNVER1-LABEL: test_pmaskmovd:
2449; ZNVER1: # BB#0:
2450; ZNVER1-NEXT: vpmaskmovd (%rdi), %xmm0, %xmm2 # sched: [100:?]
2451; ZNVER1-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi) # sched: [100:?]
2452; ZNVER1-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
2453; ZNVER1-NEXT: retq # sched: [1:0.50]
2454 %1 = call <4 x i32> @llvm.x86.avx2.maskload.d(i8* %a0, <4 x i32> %a1)
2455 call void @llvm.x86.avx2.maskstore.d(i8* %a0, <4 x i32> %a1, <4 x i32> %a2)
2456 ret <4 x i32> %1
2457}
2458declare <4 x i32> @llvm.x86.avx2.maskload.d(i8*, <4 x i32>) nounwind readonly
2459declare void @llvm.x86.avx2.maskstore.d(i8*, <4 x i32>, <4 x i32>) nounwind
2460
2461define <8 x i32> @test_pmaskmovd_ymm(i8* %a0, <8 x i32> %a1, <8 x i32> %a2) {
2462; GENERIC-LABEL: test_pmaskmovd_ymm:
2463; GENERIC: # BB#0:
2464; GENERIC-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2
2465; GENERIC-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi)
2466; GENERIC-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.50]
2467; GENERIC-NEXT: retq # sched: [1:1.00]
2468;
2469; HASWELL-LABEL: test_pmaskmovd_ymm:
2470; HASWELL: # BB#0:
2471; HASWELL-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2 # sched: [2:2.00]
2472; HASWELL-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi) # sched: [4:1.00]
2473; HASWELL-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.25]
2474; HASWELL-NEXT: retq # sched: [2:1.00]
2475;
2476; SKYLAKE-LABEL: test_pmaskmovd_ymm:
2477; SKYLAKE: # BB#0:
2478; SKYLAKE-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2 # sched: [2:2.00]
2479; SKYLAKE-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi) # sched: [4:1.00]
2480; SKYLAKE-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.25]
2481; SKYLAKE-NEXT: retq # sched: [2:1.00]
2482;
2483; ZNVER1-LABEL: test_pmaskmovd_ymm:
2484; ZNVER1: # BB#0:
2485; ZNVER1-NEXT: vpmaskmovd (%rdi), %ymm0, %ymm2 # sched: [100:?]
2486; ZNVER1-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi) # sched: [100:?]
2487; ZNVER1-NEXT: vmovdqa %ymm2, %ymm0 # sched: [2:0.25]
2488; ZNVER1-NEXT: retq # sched: [1:0.50]
2489 %1 = call <8 x i32> @llvm.x86.avx2.maskload.d.256(i8* %a0, <8 x i32> %a1)
2490 call void @llvm.x86.avx2.maskstore.d.256(i8* %a0, <8 x i32> %a1, <8 x i32> %a2)
2491 ret <8 x i32> %1
2492}
2493declare <8 x i32> @llvm.x86.avx2.maskload.d.256(i8*, <8 x i32>) nounwind readonly
2494declare void @llvm.x86.avx2.maskstore.d.256(i8*, <8 x i32>, <8 x i32>) nounwind
2495
2496define <2 x i64> @test_pmaskmovq(i8* %a0, <2 x i64> %a1, <2 x i64> %a2) {
2497; GENERIC-LABEL: test_pmaskmovq:
2498; GENERIC: # BB#0:
2499; GENERIC-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm2
2500; GENERIC-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi)
2501; GENERIC-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.50]
2502; GENERIC-NEXT: retq # sched: [1:1.00]
2503;
2504; HASWELL-LABEL: test_pmaskmovq:
2505; HASWELL: # BB#0:
2506; HASWELL-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm2 # sched: [2:2.00]
2507; HASWELL-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi) # sched: [4:1.00]
2508; HASWELL-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
2509; HASWELL-NEXT: retq # sched: [2:1.00]
2510;
2511; SKYLAKE-LABEL: test_pmaskmovq:
2512; SKYLAKE: # BB#0:
2513; SKYLAKE-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm2 # sched: [2:2.00]
2514; SKYLAKE-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi) # sched: [4:1.00]
2515; SKYLAKE-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
2516; SKYLAKE-NEXT: retq # sched: [2:1.00]
2517;
2518; ZNVER1-LABEL: test_pmaskmovq:
2519; ZNVER1: # BB#0:
2520; ZNVER1-NEXT: vpmaskmovq (%rdi), %xmm0, %xmm2 # sched: [8:1.00]
2521; ZNVER1-NEXT: vpmaskmovq %xmm1, %xmm0, (%rdi) # sched: [100:?]
2522; ZNVER1-NEXT: vmovdqa %xmm2, %xmm0 # sched: [1:0.25]
2523; ZNVER1-NEXT: retq # sched: [1:0.50]
2524 %1 = call <2 x i64> @llvm.x86.avx2.maskload.q(i8* %a0, <2 x i64> %a1)
2525 call void @llvm.x86.avx2.maskstore.q(i8* %a0, <2 x i64> %a1, <2 x i64> %a2)
2526 ret <2 x i64> %1
2527}
2528declare <2 x i64> @llvm.x86.avx2.maskload.q(i8*, <2 x i64>) nounwind readonly
2529declare void @llvm.x86.avx2.maskstore.q(i8*, <2 x i64>, <2 x i64>) nounwind
2530
2531define <4 x i64> @test_pmaskmovq_ymm(i8* %a0, <4 x i64> %a1, <4 x i64> %a2) {
2532; GENERIC-LABEL: test_pmaskmovq_ymm:
2533; GENERIC: # BB#0:
2534; GENERIC-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm2
2535; GENERIC-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi)
2536; GENERIC-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.50]
2537; GENERIC-NEXT: retq # sched: [1:1.00]
2538;
2539; HASWELL-LABEL: test_pmaskmovq_ymm:
2540; HASWELL: # BB#0:
2541; HASWELL-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm2 # sched: [2:2.00]
2542; HASWELL-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi) # sched: [4:1.00]
2543; HASWELL-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.25]
2544; HASWELL-NEXT: retq # sched: [2:1.00]
2545;
2546; SKYLAKE-LABEL: test_pmaskmovq_ymm:
2547; SKYLAKE: # BB#0:
2548; SKYLAKE-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm2 # sched: [2:2.00]
2549; SKYLAKE-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi) # sched: [4:1.00]
2550; SKYLAKE-NEXT: vmovdqa %ymm2, %ymm0 # sched: [1:0.25]
2551; SKYLAKE-NEXT: retq # sched: [2:1.00]
2552;
2553; ZNVER1-LABEL: test_pmaskmovq_ymm:
2554; ZNVER1: # BB#0:
2555; ZNVER1-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm2 # sched: [9:1.50]
2556; ZNVER1-NEXT: vpmaskmovq %ymm1, %ymm0, (%rdi) # sched: [100:?]
2557; ZNVER1-NEXT: vmovdqa %ymm2, %ymm0 # sched: [2:0.25]
2558; ZNVER1-NEXT: retq # sched: [1:0.50]
2559 %1 = call <4 x i64> @llvm.x86.avx2.maskload.q.256(i8* %a0, <4 x i64> %a1)
2560 call void @llvm.x86.avx2.maskstore.q.256(i8* %a0, <4 x i64> %a1, <4 x i64> %a2)
2561 ret <4 x i64> %1
2562}
2563declare <4 x i64> @llvm.x86.avx2.maskload.q.256(i8*, <4 x i64>) nounwind readonly
2564declare void @llvm.x86.avx2.maskstore.q.256(i8*, <4 x i64>, <4 x i64>) nounwind
2565
Simon Pilgrim0af5a7722017-09-12 15:01:20 +00002566define <32 x i8> @test_pmaxsb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
2567; GENERIC-LABEL: test_pmaxsb:
2568; GENERIC: # BB#0:
2569; GENERIC-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
2570; GENERIC-NEXT: vpmaxsb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
2571; GENERIC-NEXT: retq # sched: [1:1.00]
2572;
2573; HASWELL-LABEL: test_pmaxsb:
2574; HASWELL: # BB#0:
2575; HASWELL-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2576; HASWELL-NEXT: vpmaxsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2577; HASWELL-NEXT: retq # sched: [2:1.00]
2578;
2579; SKYLAKE-LABEL: test_pmaxsb:
2580; SKYLAKE: # BB#0:
2581; SKYLAKE-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2582; SKYLAKE-NEXT: vpmaxsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2583; SKYLAKE-NEXT: retq # sched: [2:1.00]
2584;
2585; ZNVER1-LABEL: test_pmaxsb:
2586; ZNVER1: # BB#0:
2587; ZNVER1-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
2588; ZNVER1-NEXT: vpmaxsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
2589; ZNVER1-NEXT: retq # sched: [1:0.50]
2590 %1 = call <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8> %a0, <32 x i8> %a1)
2591 %2 = load <32 x i8>, <32 x i8> *%a2, align 32
2592 %3 = call <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8> %1, <32 x i8> %2)
2593 ret <32 x i8> %3
2594}
2595declare <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8>, <32 x i8>) nounwind readnone
2596
2597define <8 x i32> @test_pmaxsd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
2598; GENERIC-LABEL: test_pmaxsd:
2599; GENERIC: # BB#0:
2600; GENERIC-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
2601; GENERIC-NEXT: vpmaxsd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
2602; GENERIC-NEXT: retq # sched: [1:1.00]
2603;
2604; HASWELL-LABEL: test_pmaxsd:
2605; HASWELL: # BB#0:
2606; HASWELL-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2607; HASWELL-NEXT: vpmaxsd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2608; HASWELL-NEXT: retq # sched: [2:1.00]
2609;
2610; SKYLAKE-LABEL: test_pmaxsd:
2611; SKYLAKE: # BB#0:
2612; SKYLAKE-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2613; SKYLAKE-NEXT: vpmaxsd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2614; SKYLAKE-NEXT: retq # sched: [2:1.00]
2615;
2616; ZNVER1-LABEL: test_pmaxsd:
2617; ZNVER1: # BB#0:
2618; ZNVER1-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
2619; ZNVER1-NEXT: vpmaxsd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
2620; ZNVER1-NEXT: retq # sched: [1:0.50]
2621 %1 = call <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32> %a0, <8 x i32> %a1)
2622 %2 = load <8 x i32>, <8 x i32> *%a2, align 32
2623 %3 = call <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32> %1, <8 x i32> %2)
2624 ret <8 x i32> %3
2625}
2626declare <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32>, <8 x i32>) nounwind readnone
2627
2628define <16 x i16> @test_pmaxsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
2629; GENERIC-LABEL: test_pmaxsw:
2630; GENERIC: # BB#0:
2631; GENERIC-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
2632; GENERIC-NEXT: vpmaxsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
2633; GENERIC-NEXT: retq # sched: [1:1.00]
2634;
2635; HASWELL-LABEL: test_pmaxsw:
2636; HASWELL: # BB#0:
2637; HASWELL-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2638; HASWELL-NEXT: vpmaxsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2639; HASWELL-NEXT: retq # sched: [2:1.00]
2640;
2641; SKYLAKE-LABEL: test_pmaxsw:
2642; SKYLAKE: # BB#0:
2643; SKYLAKE-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2644; SKYLAKE-NEXT: vpmaxsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2645; SKYLAKE-NEXT: retq # sched: [2:1.00]
2646;
2647; ZNVER1-LABEL: test_pmaxsw:
2648; ZNVER1: # BB#0:
2649; ZNVER1-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
2650; ZNVER1-NEXT: vpmaxsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
2651; ZNVER1-NEXT: retq # sched: [1:0.50]
2652 %1 = call <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16> %a0, <16 x i16> %a1)
2653 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
2654 %3 = call <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16> %1, <16 x i16> %2)
2655 ret <16 x i16> %3
2656}
2657declare <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16>, <16 x i16>) nounwind readnone
2658
2659define <32 x i8> @test_pmaxub(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
2660; GENERIC-LABEL: test_pmaxub:
2661; GENERIC: # BB#0:
2662; GENERIC-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
2663; GENERIC-NEXT: vpmaxub (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
2664; GENERIC-NEXT: retq # sched: [1:1.00]
2665;
2666; HASWELL-LABEL: test_pmaxub:
2667; HASWELL: # BB#0:
2668; HASWELL-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2669; HASWELL-NEXT: vpmaxub (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2670; HASWELL-NEXT: retq # sched: [2:1.00]
2671;
2672; SKYLAKE-LABEL: test_pmaxub:
2673; SKYLAKE: # BB#0:
2674; SKYLAKE-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2675; SKYLAKE-NEXT: vpmaxub (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2676; SKYLAKE-NEXT: retq # sched: [2:1.00]
2677;
2678; ZNVER1-LABEL: test_pmaxub:
2679; ZNVER1: # BB#0:
2680; ZNVER1-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
2681; ZNVER1-NEXT: vpmaxub (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
2682; ZNVER1-NEXT: retq # sched: [1:0.50]
2683 %1 = call <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8> %a0, <32 x i8> %a1)
2684 %2 = load <32 x i8>, <32 x i8> *%a2, align 32
2685 %3 = call <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8> %1, <32 x i8> %2)
2686 ret <32 x i8> %3
2687}
2688declare <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8>, <32 x i8>) nounwind readnone
2689
2690define <8 x i32> @test_pmaxud(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
2691; GENERIC-LABEL: test_pmaxud:
2692; GENERIC: # BB#0:
2693; GENERIC-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
2694; GENERIC-NEXT: vpmaxud (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
2695; GENERIC-NEXT: retq # sched: [1:1.00]
2696;
2697; HASWELL-LABEL: test_pmaxud:
2698; HASWELL: # BB#0:
2699; HASWELL-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2700; HASWELL-NEXT: vpmaxud (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2701; HASWELL-NEXT: retq # sched: [2:1.00]
2702;
2703; SKYLAKE-LABEL: test_pmaxud:
2704; SKYLAKE: # BB#0:
2705; SKYLAKE-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2706; SKYLAKE-NEXT: vpmaxud (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2707; SKYLAKE-NEXT: retq # sched: [2:1.00]
2708;
2709; ZNVER1-LABEL: test_pmaxud:
2710; ZNVER1: # BB#0:
2711; ZNVER1-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
2712; ZNVER1-NEXT: vpmaxud (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
2713; ZNVER1-NEXT: retq # sched: [1:0.50]
2714 %1 = call <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32> %a0, <8 x i32> %a1)
2715 %2 = load <8 x i32>, <8 x i32> *%a2, align 32
2716 %3 = call <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32> %1, <8 x i32> %2)
2717 ret <8 x i32> %3
2718}
2719declare <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32>, <8 x i32>) nounwind readnone
2720
2721define <16 x i16> @test_pmaxuw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
2722; GENERIC-LABEL: test_pmaxuw:
2723; GENERIC: # BB#0:
2724; GENERIC-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
2725; GENERIC-NEXT: vpmaxuw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
2726; GENERIC-NEXT: retq # sched: [1:1.00]
2727;
2728; HASWELL-LABEL: test_pmaxuw:
2729; HASWELL: # BB#0:
2730; HASWELL-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2731; HASWELL-NEXT: vpmaxuw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2732; HASWELL-NEXT: retq # sched: [2:1.00]
2733;
2734; SKYLAKE-LABEL: test_pmaxuw:
2735; SKYLAKE: # BB#0:
2736; SKYLAKE-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2737; SKYLAKE-NEXT: vpmaxuw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2738; SKYLAKE-NEXT: retq # sched: [2:1.00]
2739;
2740; ZNVER1-LABEL: test_pmaxuw:
2741; ZNVER1: # BB#0:
2742; ZNVER1-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
2743; ZNVER1-NEXT: vpmaxuw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
2744; ZNVER1-NEXT: retq # sched: [1:0.50]
2745 %1 = call <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16> %a0, <16 x i16> %a1)
2746 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
2747 %3 = call <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16> %1, <16 x i16> %2)
2748 ret <16 x i16> %3
2749}
2750declare <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16>, <16 x i16>) nounwind readnone
2751
2752define <32 x i8> @test_pminsb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
2753; GENERIC-LABEL: test_pminsb:
2754; GENERIC: # BB#0:
2755; GENERIC-NEXT: vpminsb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
2756; GENERIC-NEXT: vpminsb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
2757; GENERIC-NEXT: retq # sched: [1:1.00]
2758;
2759; HASWELL-LABEL: test_pminsb:
2760; HASWELL: # BB#0:
2761; HASWELL-NEXT: vpminsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2762; HASWELL-NEXT: vpminsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2763; HASWELL-NEXT: retq # sched: [2:1.00]
2764;
2765; SKYLAKE-LABEL: test_pminsb:
2766; SKYLAKE: # BB#0:
2767; SKYLAKE-NEXT: vpminsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2768; SKYLAKE-NEXT: vpminsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2769; SKYLAKE-NEXT: retq # sched: [2:1.00]
2770;
2771; ZNVER1-LABEL: test_pminsb:
2772; ZNVER1: # BB#0:
2773; ZNVER1-NEXT: vpminsb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
2774; ZNVER1-NEXT: vpminsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
2775; ZNVER1-NEXT: retq # sched: [1:0.50]
2776 %1 = call <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8> %a0, <32 x i8> %a1)
2777 %2 = load <32 x i8>, <32 x i8> *%a2, align 32
2778 %3 = call <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8> %1, <32 x i8> %2)
2779 ret <32 x i8> %3
2780}
2781declare <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8>, <32 x i8>) nounwind readnone
2782
2783define <8 x i32> @test_pminsd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
2784; GENERIC-LABEL: test_pminsd:
2785; GENERIC: # BB#0:
2786; GENERIC-NEXT: vpminsd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
2787; GENERIC-NEXT: vpminsd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
2788; GENERIC-NEXT: retq # sched: [1:1.00]
2789;
2790; HASWELL-LABEL: test_pminsd:
2791; HASWELL: # BB#0:
2792; HASWELL-NEXT: vpminsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2793; HASWELL-NEXT: vpminsd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2794; HASWELL-NEXT: retq # sched: [2:1.00]
2795;
2796; SKYLAKE-LABEL: test_pminsd:
2797; SKYLAKE: # BB#0:
2798; SKYLAKE-NEXT: vpminsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2799; SKYLAKE-NEXT: vpminsd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2800; SKYLAKE-NEXT: retq # sched: [2:1.00]
2801;
2802; ZNVER1-LABEL: test_pminsd:
2803; ZNVER1: # BB#0:
2804; ZNVER1-NEXT: vpminsd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
2805; ZNVER1-NEXT: vpminsd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
2806; ZNVER1-NEXT: retq # sched: [1:0.50]
2807 %1 = call <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32> %a0, <8 x i32> %a1)
2808 %2 = load <8 x i32>, <8 x i32> *%a2, align 32
2809 %3 = call <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32> %1, <8 x i32> %2)
2810 ret <8 x i32> %3
2811}
2812declare <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32>, <8 x i32>) nounwind readnone
2813
2814define <16 x i16> @test_pminsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
2815; GENERIC-LABEL: test_pminsw:
2816; GENERIC: # BB#0:
2817; GENERIC-NEXT: vpminsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
2818; GENERIC-NEXT: vpminsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
2819; GENERIC-NEXT: retq # sched: [1:1.00]
2820;
2821; HASWELL-LABEL: test_pminsw:
2822; HASWELL: # BB#0:
2823; HASWELL-NEXT: vpminsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2824; HASWELL-NEXT: vpminsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2825; HASWELL-NEXT: retq # sched: [2:1.00]
2826;
2827; SKYLAKE-LABEL: test_pminsw:
2828; SKYLAKE: # BB#0:
2829; SKYLAKE-NEXT: vpminsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2830; SKYLAKE-NEXT: vpminsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2831; SKYLAKE-NEXT: retq # sched: [2:1.00]
2832;
2833; ZNVER1-LABEL: test_pminsw:
2834; ZNVER1: # BB#0:
2835; ZNVER1-NEXT: vpminsw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
2836; ZNVER1-NEXT: vpminsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
2837; ZNVER1-NEXT: retq # sched: [1:0.50]
2838 %1 = call <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16> %a0, <16 x i16> %a1)
2839 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
2840 %3 = call <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16> %1, <16 x i16> %2)
2841 ret <16 x i16> %3
2842}
2843declare <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16>, <16 x i16>) nounwind readnone
2844
2845define <32 x i8> @test_pminub(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
2846; GENERIC-LABEL: test_pminub:
2847; GENERIC: # BB#0:
2848; GENERIC-NEXT: vpminub %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
2849; GENERIC-NEXT: vpminub (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
2850; GENERIC-NEXT: retq # sched: [1:1.00]
2851;
2852; HASWELL-LABEL: test_pminub:
2853; HASWELL: # BB#0:
2854; HASWELL-NEXT: vpminub %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2855; HASWELL-NEXT: vpminub (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2856; HASWELL-NEXT: retq # sched: [2:1.00]
2857;
2858; SKYLAKE-LABEL: test_pminub:
2859; SKYLAKE: # BB#0:
2860; SKYLAKE-NEXT: vpminub %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2861; SKYLAKE-NEXT: vpminub (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2862; SKYLAKE-NEXT: retq # sched: [2:1.00]
2863;
2864; ZNVER1-LABEL: test_pminub:
2865; ZNVER1: # BB#0:
2866; ZNVER1-NEXT: vpminub %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
2867; ZNVER1-NEXT: vpminub (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
2868; ZNVER1-NEXT: retq # sched: [1:0.50]
2869 %1 = call <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8> %a0, <32 x i8> %a1)
2870 %2 = load <32 x i8>, <32 x i8> *%a2, align 32
2871 %3 = call <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8> %1, <32 x i8> %2)
2872 ret <32 x i8> %3
2873}
2874declare <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8>, <32 x i8>) nounwind readnone
2875
2876define <8 x i32> @test_pminud(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
2877; GENERIC-LABEL: test_pminud:
2878; GENERIC: # BB#0:
2879; GENERIC-NEXT: vpminud %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
2880; GENERIC-NEXT: vpminud (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
2881; GENERIC-NEXT: retq # sched: [1:1.00]
2882;
2883; HASWELL-LABEL: test_pminud:
2884; HASWELL: # BB#0:
2885; HASWELL-NEXT: vpminud %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2886; HASWELL-NEXT: vpminud (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2887; HASWELL-NEXT: retq # sched: [2:1.00]
2888;
2889; SKYLAKE-LABEL: test_pminud:
2890; SKYLAKE: # BB#0:
2891; SKYLAKE-NEXT: vpminud %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2892; SKYLAKE-NEXT: vpminud (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2893; SKYLAKE-NEXT: retq # sched: [2:1.00]
2894;
2895; ZNVER1-LABEL: test_pminud:
2896; ZNVER1: # BB#0:
2897; ZNVER1-NEXT: vpminud %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
2898; ZNVER1-NEXT: vpminud (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
2899; ZNVER1-NEXT: retq # sched: [1:0.50]
2900 %1 = call <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32> %a0, <8 x i32> %a1)
2901 %2 = load <8 x i32>, <8 x i32> *%a2, align 32
2902 %3 = call <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32> %1, <8 x i32> %2)
2903 ret <8 x i32> %3
2904}
2905declare <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32>, <8 x i32>) nounwind readnone
2906
2907define <16 x i16> @test_pminuw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
2908; GENERIC-LABEL: test_pminuw:
2909; GENERIC: # BB#0:
2910; GENERIC-NEXT: vpminuw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
2911; GENERIC-NEXT: vpminuw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
2912; GENERIC-NEXT: retq # sched: [1:1.00]
2913;
2914; HASWELL-LABEL: test_pminuw:
2915; HASWELL: # BB#0:
2916; HASWELL-NEXT: vpminuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2917; HASWELL-NEXT: vpminuw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2918; HASWELL-NEXT: retq # sched: [2:1.00]
2919;
2920; SKYLAKE-LABEL: test_pminuw:
2921; SKYLAKE: # BB#0:
2922; SKYLAKE-NEXT: vpminuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2923; SKYLAKE-NEXT: vpminuw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
2924; SKYLAKE-NEXT: retq # sched: [2:1.00]
2925;
2926; ZNVER1-LABEL: test_pminuw:
2927; ZNVER1: # BB#0:
2928; ZNVER1-NEXT: vpminuw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
2929; ZNVER1-NEXT: vpminuw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
2930; ZNVER1-NEXT: retq # sched: [1:0.50]
2931 %1 = call <16 x i16> @llvm.x86.avx2.pminu.w(<16 x i16> %a0, <16 x i16> %a1)
2932 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
2933 %3 = call <16 x i16> @llvm.x86.avx2.pminu.w(<16 x i16> %1, <16 x i16> %2)
2934 ret <16 x i16> %3
2935}
2936declare <16 x i16> @llvm.x86.avx2.pminu.w(<16 x i16>, <16 x i16>) nounwind readnone
2937
Simon Pilgrim76418aa2017-09-12 15:52:01 +00002938define i32 @test_pmovmskb(<32 x i8> %a0) {
2939; GENERIC-LABEL: test_pmovmskb:
2940; GENERIC: # BB#0:
2941; GENERIC-NEXT: vpmovmskb %ymm0, %eax # sched: [1:1.00]
2942; GENERIC-NEXT: vzeroupper
2943; GENERIC-NEXT: retq # sched: [1:1.00]
2944;
2945; HASWELL-LABEL: test_pmovmskb:
2946; HASWELL: # BB#0:
2947; HASWELL-NEXT: vpmovmskb %ymm0, %eax # sched: [3:1.00]
2948; HASWELL-NEXT: vzeroupper # sched: [4:1.00]
2949; HASWELL-NEXT: retq # sched: [2:1.00]
2950;
2951; SKYLAKE-LABEL: test_pmovmskb:
2952; SKYLAKE: # BB#0:
2953; SKYLAKE-NEXT: vpmovmskb %ymm0, %eax # sched: [3:1.00]
2954; SKYLAKE-NEXT: vzeroupper # sched: [4:1.00]
2955; SKYLAKE-NEXT: retq # sched: [2:1.00]
2956;
2957; ZNVER1-LABEL: test_pmovmskb:
2958; ZNVER1: # BB#0:
2959; ZNVER1-NEXT: vpmovmskb %ymm0, %eax # sched: [2:1.00]
2960; ZNVER1-NEXT: vzeroupper # sched: [100:?]
2961; ZNVER1-NEXT: retq # sched: [1:0.50]
2962 %1 = call i32 @llvm.x86.avx2.pmovmskb(<32 x i8> %a0)
2963 ret i32 %1
2964}
2965declare i32 @llvm.x86.avx2.pmovmskb(<32 x i8>) nounwind readnone
2966
Simon Pilgrim0af5a7722017-09-12 15:01:20 +00002967define <8 x i32> @test_pmovsxbd(<16 x i8> %a0, <16 x i8> *%a1) {
2968; GENERIC-LABEL: test_pmovsxbd:
2969; GENERIC: # BB#0:
2970; GENERIC-NEXT: vpmovsxbd %xmm0, %ymm0 # sched: [1:1.00]
2971; GENERIC-NEXT: vpmovsxbd (%rdi), %ymm1 # sched: [5:1.00]
2972; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
2973; GENERIC-NEXT: retq # sched: [1:1.00]
2974;
2975; HASWELL-LABEL: test_pmovsxbd:
2976; HASWELL: # BB#0:
2977; HASWELL-NEXT: vpmovsxbd %xmm0, %ymm0 # sched: [3:1.00]
2978; HASWELL-NEXT: vpmovsxbd (%rdi), %ymm1 # sched: [3:1.00]
2979; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2980; HASWELL-NEXT: retq # sched: [2:1.00]
2981;
2982; SKYLAKE-LABEL: test_pmovsxbd:
2983; SKYLAKE: # BB#0:
2984; SKYLAKE-NEXT: vpmovsxbd %xmm0, %ymm0 # sched: [3:1.00]
2985; SKYLAKE-NEXT: vpmovsxbd (%rdi), %ymm1 # sched: [3:1.00]
2986; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
2987; SKYLAKE-NEXT: retq # sched: [2:1.00]
2988;
2989; ZNVER1-LABEL: test_pmovsxbd:
2990; ZNVER1: # BB#0:
2991; ZNVER1-NEXT: vpmovsxbd (%rdi), %ymm1 # sched: [8:0.50]
2992; ZNVER1-NEXT: vpmovsxbd %xmm0, %ymm0 # sched: [1:0.25]
2993; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
2994; ZNVER1-NEXT: retq # sched: [1:0.50]
2995 %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
2996 %2 = sext <8 x i8> %1 to <8 x i32>
2997 %3 = load <16 x i8>, <16 x i8> *%a1, align 16
2998 %4 = shufflevector <16 x i8> %3, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
2999 %5 = sext <8 x i8> %4 to <8 x i32>
3000 %6 = add <8 x i32> %2, %5
3001 ret <8 x i32> %6
3002}
3003
3004define <4 x i64> @test_pmovsxbq(<16 x i8> %a0, <16 x i8> *%a1) {
3005; GENERIC-LABEL: test_pmovsxbq:
3006; GENERIC: # BB#0:
3007; GENERIC-NEXT: vpmovsxbq %xmm0, %ymm0 # sched: [1:1.00]
3008; GENERIC-NEXT: vpmovsxbq (%rdi), %ymm1 # sched: [5:1.00]
3009; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
3010; GENERIC-NEXT: retq # sched: [1:1.00]
3011;
3012; HASWELL-LABEL: test_pmovsxbq:
3013; HASWELL: # BB#0:
3014; HASWELL-NEXT: vpmovsxbq %xmm0, %ymm0 # sched: [3:1.00]
3015; HASWELL-NEXT: vpmovsxbq (%rdi), %ymm1 # sched: [3:1.00]
3016; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3017; HASWELL-NEXT: retq # sched: [2:1.00]
3018;
3019; SKYLAKE-LABEL: test_pmovsxbq:
3020; SKYLAKE: # BB#0:
3021; SKYLAKE-NEXT: vpmovsxbq %xmm0, %ymm0 # sched: [3:1.00]
3022; SKYLAKE-NEXT: vpmovsxbq (%rdi), %ymm1 # sched: [3:1.00]
3023; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3024; SKYLAKE-NEXT: retq # sched: [2:1.00]
3025;
3026; ZNVER1-LABEL: test_pmovsxbq:
3027; ZNVER1: # BB#0:
3028; ZNVER1-NEXT: vpmovsxbq (%rdi), %ymm1 # sched: [8:0.50]
3029; ZNVER1-NEXT: vpmovsxbq %xmm0, %ymm0 # sched: [1:0.50]
3030; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
3031; ZNVER1-NEXT: retq # sched: [1:0.50]
3032 %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
3033 %2 = sext <4 x i8> %1 to <4 x i64>
3034 %3 = load <16 x i8>, <16 x i8> *%a1, align 16
3035 %4 = shufflevector <16 x i8> %3, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
3036 %5 = sext <4 x i8> %4 to <4 x i64>
3037 %6 = add <4 x i64> %2, %5
3038 ret <4 x i64> %6
3039}
3040
3041define <16 x i16> @test_pmovsxbw(<16 x i8> %a0, <16 x i8> *%a1) {
3042; GENERIC-LABEL: test_pmovsxbw:
3043; GENERIC: # BB#0:
3044; GENERIC-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [1:1.00]
3045; GENERIC-NEXT: vpmovsxbw (%rdi), %ymm1 # sched: [5:1.00]
3046; GENERIC-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
3047; GENERIC-NEXT: retq # sched: [1:1.00]
3048;
3049; HASWELL-LABEL: test_pmovsxbw:
3050; HASWELL: # BB#0:
3051; HASWELL-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [3:1.00]
3052; HASWELL-NEXT: vpmovsxbw (%rdi), %ymm1 # sched: [3:1.00]
3053; HASWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3054; HASWELL-NEXT: retq # sched: [2:1.00]
3055;
3056; SKYLAKE-LABEL: test_pmovsxbw:
3057; SKYLAKE: # BB#0:
3058; SKYLAKE-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [3:1.00]
3059; SKYLAKE-NEXT: vpmovsxbw (%rdi), %ymm1 # sched: [3:1.00]
3060; SKYLAKE-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3061; SKYLAKE-NEXT: retq # sched: [2:1.00]
3062;
3063; ZNVER1-LABEL: test_pmovsxbw:
3064; ZNVER1: # BB#0:
3065; ZNVER1-NEXT: vpmovsxbw (%rdi), %ymm1 # sched: [8:0.50]
3066; ZNVER1-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [1:0.50]
3067; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
3068; ZNVER1-NEXT: retq # sched: [1:0.50]
3069 %1 = sext <16 x i8> %a0 to <16 x i16>
3070 %2 = load <16 x i8>, <16 x i8> *%a1, align 16
3071 %3 = sext <16 x i8> %2 to <16 x i16>
3072 %4 = add <16 x i16> %1, %3
3073 ret <16 x i16> %4
3074}
3075
3076define <4 x i64> @test_pmovsxdq(<4 x i32> %a0, <4 x i32> *%a1) {
3077; GENERIC-LABEL: test_pmovsxdq:
3078; GENERIC: # BB#0:
3079; GENERIC-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [1:1.00]
3080; GENERIC-NEXT: vpmovsxdq (%rdi), %ymm1 # sched: [5:1.00]
3081; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
3082; GENERIC-NEXT: retq # sched: [1:1.00]
3083;
3084; HASWELL-LABEL: test_pmovsxdq:
3085; HASWELL: # BB#0:
3086; HASWELL-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [3:1.00]
3087; HASWELL-NEXT: vpmovsxdq (%rdi), %ymm1 # sched: [3:1.00]
3088; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3089; HASWELL-NEXT: retq # sched: [2:1.00]
3090;
3091; SKYLAKE-LABEL: test_pmovsxdq:
3092; SKYLAKE: # BB#0:
3093; SKYLAKE-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [3:1.00]
3094; SKYLAKE-NEXT: vpmovsxdq (%rdi), %ymm1 # sched: [3:1.00]
3095; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3096; SKYLAKE-NEXT: retq # sched: [2:1.00]
3097;
3098; ZNVER1-LABEL: test_pmovsxdq:
3099; ZNVER1: # BB#0:
3100; ZNVER1-NEXT: vpmovsxdq (%rdi), %ymm1 # sched: [8:0.50]
3101; ZNVER1-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [1:0.50]
3102; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
3103; ZNVER1-NEXT: retq # sched: [1:0.50]
3104 %1 = sext <4 x i32> %a0 to <4 x i64>
3105 %2 = load <4 x i32>, <4 x i32> *%a1, align 16
3106 %3 = sext <4 x i32> %2 to <4 x i64>
3107 %4 = add <4 x i64> %1, %3
3108 ret <4 x i64> %4
3109}
3110
3111define <8 x i32> @test_pmovsxwd(<8 x i16> %a0, <8 x i16> *%a1) {
3112; GENERIC-LABEL: test_pmovsxwd:
3113; GENERIC: # BB#0:
3114; GENERIC-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [1:1.00]
3115; GENERIC-NEXT: vpmovsxwd (%rdi), %ymm1 # sched: [5:1.00]
3116; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
3117; GENERIC-NEXT: retq # sched: [1:1.00]
3118;
3119; HASWELL-LABEL: test_pmovsxwd:
3120; HASWELL: # BB#0:
3121; HASWELL-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [3:1.00]
3122; HASWELL-NEXT: vpmovsxwd (%rdi), %ymm1 # sched: [3:1.00]
3123; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3124; HASWELL-NEXT: retq # sched: [2:1.00]
3125;
3126; SKYLAKE-LABEL: test_pmovsxwd:
3127; SKYLAKE: # BB#0:
3128; SKYLAKE-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [3:1.00]
3129; SKYLAKE-NEXT: vpmovsxwd (%rdi), %ymm1 # sched: [3:1.00]
3130; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3131; SKYLAKE-NEXT: retq # sched: [2:1.00]
3132;
3133; ZNVER1-LABEL: test_pmovsxwd:
3134; ZNVER1: # BB#0:
3135; ZNVER1-NEXT: vpmovsxwd (%rdi), %ymm1 # sched: [8:0.50]
3136; ZNVER1-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [1:0.25]
3137; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
3138; ZNVER1-NEXT: retq # sched: [1:0.50]
3139 %1 = sext <8 x i16> %a0 to <8 x i32>
3140 %2 = load <8 x i16>, <8 x i16> *%a1, align 16
3141 %3 = sext <8 x i16> %2 to <8 x i32>
3142 %4 = add <8 x i32> %1, %3
3143 ret <8 x i32> %4
3144}
3145
3146define <4 x i64> @test_pmovsxwq(<8 x i16> %a0, <8 x i16> *%a1) {
3147; GENERIC-LABEL: test_pmovsxwq:
3148; GENERIC: # BB#0:
3149; GENERIC-NEXT: vpmovsxwq %xmm0, %ymm0 # sched: [1:1.00]
3150; GENERIC-NEXT: vpmovsxwq (%rdi), %ymm1 # sched: [5:1.00]
3151; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
3152; GENERIC-NEXT: retq # sched: [1:1.00]
3153;
3154; HASWELL-LABEL: test_pmovsxwq:
3155; HASWELL: # BB#0:
3156; HASWELL-NEXT: vpmovsxwq %xmm0, %ymm0 # sched: [3:1.00]
3157; HASWELL-NEXT: vpmovsxwq (%rdi), %ymm1 # sched: [3:1.00]
3158; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3159; HASWELL-NEXT: retq # sched: [2:1.00]
3160;
3161; SKYLAKE-LABEL: test_pmovsxwq:
3162; SKYLAKE: # BB#0:
3163; SKYLAKE-NEXT: vpmovsxwq %xmm0, %ymm0 # sched: [3:1.00]
3164; SKYLAKE-NEXT: vpmovsxwq (%rdi), %ymm1 # sched: [3:1.00]
3165; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3166; SKYLAKE-NEXT: retq # sched: [2:1.00]
3167;
3168; ZNVER1-LABEL: test_pmovsxwq:
3169; ZNVER1: # BB#0:
3170; ZNVER1-NEXT: vpmovsxwq (%rdi), %ymm1 # sched: [8:0.50]
3171; ZNVER1-NEXT: vpmovsxwq %xmm0, %ymm0 # sched: [1:0.25]
3172; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
3173; ZNVER1-NEXT: retq # sched: [1:0.50]
3174 %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
3175 %2 = sext <4 x i16> %1 to <4 x i64>
3176 %3 = load <8 x i16>, <8 x i16> *%a1, align 16
3177 %4 = shufflevector <8 x i16> %3, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
3178 %5 = sext <4 x i16> %4 to <4 x i64>
3179 %6 = add <4 x i64> %2, %5
3180 ret <4 x i64> %6
3181}
3182
3183define <8 x i32> @test_pmovzxbd(<16 x i8> %a0, <16 x i8> *%a1) {
3184; GENERIC-LABEL: test_pmovzxbd:
3185; GENERIC: # BB#0:
3186; GENERIC-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [1:1.00]
3187; GENERIC-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [5:1.00]
3188; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
3189; GENERIC-NEXT: retq # sched: [1:1.00]
3190;
3191; HASWELL-LABEL: test_pmovzxbd:
3192; HASWELL: # BB#0:
3193; HASWELL-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [3:1.00]
3194; HASWELL-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [3:1.00]
3195; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3196; HASWELL-NEXT: retq # sched: [2:1.00]
3197;
3198; SKYLAKE-LABEL: test_pmovzxbd:
3199; SKYLAKE: # BB#0:
3200; SKYLAKE-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [3:1.00]
3201; SKYLAKE-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [3:1.00]
3202; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3203; SKYLAKE-NEXT: retq # sched: [2:1.00]
3204;
3205; ZNVER1-LABEL: test_pmovzxbd:
3206; ZNVER1: # BB#0:
3207; ZNVER1-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [8:0.50]
3208; ZNVER1-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [1:0.25]
3209; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
3210; ZNVER1-NEXT: retq # sched: [1:0.50]
3211 %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
3212 %2 = zext <8 x i8> %1 to <8 x i32>
3213 %3 = load <16 x i8>, <16 x i8> *%a1, align 16
3214 %4 = shufflevector <16 x i8> %3, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
3215 %5 = zext <8 x i8> %4 to <8 x i32>
3216 %6 = add <8 x i32> %2, %5
3217 ret <8 x i32> %6
3218}
3219
3220define <4 x i64> @test_pmovzxbq(<16 x i8> %a0, <16 x i8> *%a1) {
3221; GENERIC-LABEL: test_pmovzxbq:
3222; GENERIC: # BB#0:
3223; GENERIC-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [1:1.00]
3224; GENERIC-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [5:1.00]
3225; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
3226; GENERIC-NEXT: retq # sched: [1:1.00]
3227;
3228; HASWELL-LABEL: test_pmovzxbq:
3229; HASWELL: # BB#0:
3230; HASWELL-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [3:1.00]
3231; HASWELL-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [3:1.00]
3232; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3233; HASWELL-NEXT: retq # sched: [2:1.00]
3234;
3235; SKYLAKE-LABEL: test_pmovzxbq:
3236; SKYLAKE: # BB#0:
3237; SKYLAKE-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [3:1.00]
3238; SKYLAKE-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [3:1.00]
3239; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3240; SKYLAKE-NEXT: retq # sched: [2:1.00]
3241;
3242; ZNVER1-LABEL: test_pmovzxbq:
3243; ZNVER1: # BB#0:
3244; ZNVER1-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [8:0.50]
3245; ZNVER1-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [1:0.50]
3246; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
3247; ZNVER1-NEXT: retq # sched: [1:0.50]
3248 %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
3249 %2 = zext <4 x i8> %1 to <4 x i64>
3250 %3 = load <16 x i8>, <16 x i8> *%a1, align 16
3251 %4 = shufflevector <16 x i8> %3, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
3252 %5 = zext <4 x i8> %4 to <4 x i64>
3253 %6 = add <4 x i64> %2, %5
3254 ret <4 x i64> %6
3255}
3256
3257define <16 x i16> @test_pmovzxbw(<16 x i8> %a0, <16 x i8> *%a1) {
3258; GENERIC-LABEL: test_pmovzxbw:
3259; GENERIC: # BB#0:
3260; GENERIC-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [1:1.00]
3261; GENERIC-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [5:1.00]
3262; GENERIC-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
3263; GENERIC-NEXT: retq # sched: [1:1.00]
3264;
3265; HASWELL-LABEL: test_pmovzxbw:
3266; HASWELL: # BB#0:
3267; HASWELL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00]
3268; HASWELL-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [3:1.00]
3269; HASWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3270; HASWELL-NEXT: retq # sched: [2:1.00]
3271;
3272; SKYLAKE-LABEL: test_pmovzxbw:
3273; SKYLAKE: # BB#0:
3274; SKYLAKE-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00]
3275; SKYLAKE-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [3:1.00]
3276; SKYLAKE-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3277; SKYLAKE-NEXT: retq # sched: [2:1.00]
3278;
3279; ZNVER1-LABEL: test_pmovzxbw:
3280; ZNVER1: # BB#0:
3281; ZNVER1-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [8:0.50]
3282; ZNVER1-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [1:0.50]
3283; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
3284; ZNVER1-NEXT: retq # sched: [1:0.50]
3285 %1 = zext <16 x i8> %a0 to <16 x i16>
3286 %2 = load <16 x i8>, <16 x i8> *%a1, align 16
3287 %3 = zext <16 x i8> %2 to <16 x i16>
3288 %4 = add <16 x i16> %1, %3
3289 ret <16 x i16> %4
3290}
3291
3292define <4 x i64> @test_pmovzxdq(<4 x i32> %a0, <4 x i32> *%a1) {
3293; GENERIC-LABEL: test_pmovzxdq:
3294; GENERIC: # BB#0:
3295; GENERIC-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:1.00]
3296; GENERIC-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [5:1.00]
3297; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
3298; GENERIC-NEXT: retq # sched: [1:1.00]
3299;
3300; HASWELL-LABEL: test_pmovzxdq:
3301; HASWELL: # BB#0:
3302; HASWELL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [3:1.00]
3303; HASWELL-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [3:1.00]
3304; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3305; HASWELL-NEXT: retq # sched: [2:1.00]
3306;
3307; SKYLAKE-LABEL: test_pmovzxdq:
3308; SKYLAKE: # BB#0:
3309; SKYLAKE-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [3:1.00]
3310; SKYLAKE-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [3:1.00]
3311; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3312; SKYLAKE-NEXT: retq # sched: [2:1.00]
3313;
3314; ZNVER1-LABEL: test_pmovzxdq:
3315; ZNVER1: # BB#0:
3316; ZNVER1-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [8:0.50]
3317; ZNVER1-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:0.50]
3318; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
3319; ZNVER1-NEXT: retq # sched: [1:0.50]
3320 %1 = zext <4 x i32> %a0 to <4 x i64>
3321 %2 = load <4 x i32>, <4 x i32> *%a1, align 16
3322 %3 = zext <4 x i32> %2 to <4 x i64>
3323 %4 = add <4 x i64> %1, %3
3324 ret <4 x i64> %4
3325}
3326
3327define <8 x i32> @test_pmovzxwd(<8 x i16> %a0, <8 x i16> *%a1) {
3328; GENERIC-LABEL: test_pmovzxwd:
3329; GENERIC: # BB#0:
3330; GENERIC-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:1.00]
3331; GENERIC-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [5:1.00]
3332; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
3333; GENERIC-NEXT: retq # sched: [1:1.00]
3334;
3335; HASWELL-LABEL: test_pmovzxwd:
3336; HASWELL: # BB#0:
3337; HASWELL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
3338; HASWELL-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [3:1.00]
3339; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3340; HASWELL-NEXT: retq # sched: [2:1.00]
3341;
3342; SKYLAKE-LABEL: test_pmovzxwd:
3343; SKYLAKE: # BB#0:
3344; SKYLAKE-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00]
3345; SKYLAKE-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [3:1.00]
3346; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3347; SKYLAKE-NEXT: retq # sched: [2:1.00]
3348;
3349; ZNVER1-LABEL: test_pmovzxwd:
3350; ZNVER1: # BB#0:
3351; ZNVER1-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [8:0.50]
3352; ZNVER1-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:0.25]
3353; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
3354; ZNVER1-NEXT: retq # sched: [1:0.50]
3355 %1 = zext <8 x i16> %a0 to <8 x i32>
3356 %2 = load <8 x i16>, <8 x i16> *%a1, align 16
3357 %3 = zext <8 x i16> %2 to <8 x i32>
3358 %4 = add <8 x i32> %1, %3
3359 ret <8 x i32> %4
3360}
3361
3362define <4 x i64> @test_pmovzxwq(<8 x i16> %a0, <8 x i16> *%a1) {
3363; GENERIC-LABEL: test_pmovzxwq:
3364; GENERIC: # BB#0:
3365; GENERIC-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:1.00]
3366; GENERIC-NEXT: vpmovzxwq {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [5:1.00]
3367; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
3368; GENERIC-NEXT: retq # sched: [1:1.00]
3369;
3370; HASWELL-LABEL: test_pmovzxwq:
3371; HASWELL: # BB#0:
3372; HASWELL-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [3:1.00]
3373; HASWELL-NEXT: vpmovzxwq {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [3:1.00]
3374; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3375; HASWELL-NEXT: retq # sched: [2:1.00]
3376;
3377; SKYLAKE-LABEL: test_pmovzxwq:
3378; SKYLAKE: # BB#0:
3379; SKYLAKE-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [3:1.00]
3380; SKYLAKE-NEXT: vpmovzxwq {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [3:1.00]
3381; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3382; SKYLAKE-NEXT: retq # sched: [2:1.00]
3383;
3384; ZNVER1-LABEL: test_pmovzxwq:
3385; ZNVER1: # BB#0:
3386; ZNVER1-NEXT: vpmovzxwq {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [8:0.50]
3387; ZNVER1-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:0.25]
3388; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
3389; ZNVER1-NEXT: retq # sched: [1:0.50]
3390 %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
3391 %2 = zext <4 x i16> %1 to <4 x i64>
3392 %3 = load <8 x i16>, <8 x i16> *%a1, align 16
3393 %4 = shufflevector <8 x i16> %3, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
3394 %5 = zext <4 x i16> %4 to <4 x i64>
3395 %6 = add <4 x i64> %2, %5
3396 ret <4 x i64> %6
3397}
3398
3399define <4 x i64> @test_pmuldq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
3400; GENERIC-LABEL: test_pmuldq:
3401; GENERIC: # BB#0:
3402; GENERIC-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
3403; GENERIC-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
3404; GENERIC-NEXT: retq # sched: [1:1.00]
3405;
3406; HASWELL-LABEL: test_pmuldq:
3407; HASWELL: # BB#0:
3408; HASWELL-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
3409; HASWELL-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
3410; HASWELL-NEXT: retq # sched: [2:1.00]
3411;
3412; SKYLAKE-LABEL: test_pmuldq:
3413; SKYLAKE: # BB#0:
3414; SKYLAKE-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
3415; SKYLAKE-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
3416; SKYLAKE-NEXT: retq # sched: [2:1.00]
3417;
3418; ZNVER1-LABEL: test_pmuldq:
3419; ZNVER1: # BB#0:
3420; ZNVER1-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
3421; ZNVER1-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
3422; ZNVER1-NEXT: retq # sched: [1:0.50]
3423 %1 = call <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32> %a0, <8 x i32> %a1)
3424 %2 = bitcast <4 x i64> %1 to <8 x i32>
3425 %3 = load <8 x i32>, <8 x i32> *%a2, align 32
3426 %4 = call <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32> %2, <8 x i32> %3)
3427 ret <4 x i64> %4
3428}
3429declare <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32>, <8 x i32>) nounwind readnone
3430
3431define <16 x i16> @test_pmulhrsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
3432; GENERIC-LABEL: test_pmulhrsw:
3433; GENERIC: # BB#0:
3434; GENERIC-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
3435; GENERIC-NEXT: vpmulhrsw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
3436; GENERIC-NEXT: retq # sched: [1:1.00]
3437;
3438; HASWELL-LABEL: test_pmulhrsw:
3439; HASWELL: # BB#0:
3440; HASWELL-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
3441; HASWELL-NEXT: vpmulhrsw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
3442; HASWELL-NEXT: retq # sched: [2:1.00]
3443;
3444; SKYLAKE-LABEL: test_pmulhrsw:
3445; SKYLAKE: # BB#0:
3446; SKYLAKE-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
3447; SKYLAKE-NEXT: vpmulhrsw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
3448; SKYLAKE-NEXT: retq # sched: [2:1.00]
3449;
3450; ZNVER1-LABEL: test_pmulhrsw:
3451; ZNVER1: # BB#0:
3452; ZNVER1-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
3453; ZNVER1-NEXT: vpmulhrsw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
3454; ZNVER1-NEXT: retq # sched: [1:0.50]
3455 %1 = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %a0, <16 x i16> %a1)
3456 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
3457 %3 = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %1, <16 x i16> %2)
3458 ret <16 x i16> %3
3459}
3460declare <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16>, <16 x i16>) nounwind readnone
3461
3462define <16 x i16> @test_pmulhuw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
3463; GENERIC-LABEL: test_pmulhuw:
3464; GENERIC: # BB#0:
3465; GENERIC-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
3466; GENERIC-NEXT: vpmulhuw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
3467; GENERIC-NEXT: retq # sched: [1:1.00]
3468;
3469; HASWELL-LABEL: test_pmulhuw:
3470; HASWELL: # BB#0:
3471; HASWELL-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
3472; HASWELL-NEXT: vpmulhuw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
3473; HASWELL-NEXT: retq # sched: [2:1.00]
3474;
3475; SKYLAKE-LABEL: test_pmulhuw:
3476; SKYLAKE: # BB#0:
3477; SKYLAKE-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
3478; SKYLAKE-NEXT: vpmulhuw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
3479; SKYLAKE-NEXT: retq # sched: [2:1.00]
3480;
3481; ZNVER1-LABEL: test_pmulhuw:
3482; ZNVER1: # BB#0:
3483; ZNVER1-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
3484; ZNVER1-NEXT: vpmulhuw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
3485; ZNVER1-NEXT: retq # sched: [1:0.50]
3486 %1 = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %a0, <16 x i16> %a1)
3487 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
3488 %3 = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %1, <16 x i16> %2)
3489 ret <16 x i16> %3
3490}
3491declare <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16>, <16 x i16>) nounwind readnone
3492
3493define <16 x i16> @test_pmulhw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
3494; GENERIC-LABEL: test_pmulhw:
3495; GENERIC: # BB#0:
3496; GENERIC-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
3497; GENERIC-NEXT: vpmulhw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
3498; GENERIC-NEXT: retq # sched: [1:1.00]
3499;
3500; HASWELL-LABEL: test_pmulhw:
3501; HASWELL: # BB#0:
3502; HASWELL-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
3503; HASWELL-NEXT: vpmulhw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
3504; HASWELL-NEXT: retq # sched: [2:1.00]
3505;
3506; SKYLAKE-LABEL: test_pmulhw:
3507; SKYLAKE: # BB#0:
3508; SKYLAKE-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
3509; SKYLAKE-NEXT: vpmulhw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
3510; SKYLAKE-NEXT: retq # sched: [2:1.00]
3511;
3512; ZNVER1-LABEL: test_pmulhw:
3513; ZNVER1: # BB#0:
3514; ZNVER1-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
3515; ZNVER1-NEXT: vpmulhw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
3516; ZNVER1-NEXT: retq # sched: [1:0.50]
3517 %1 = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %a0, <16 x i16> %a1)
3518 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
3519 %3 = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %1, <16 x i16> %2)
3520 ret <16 x i16> %3
3521}
3522declare <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16>, <16 x i16>) nounwind readnone
3523
Simon Pilgrim946f08c2017-05-06 13:46:09 +00003524define <8 x i32> @test_pmulld(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
Simon Pilgrim84846982017-08-01 15:14:35 +00003525; GENERIC-LABEL: test_pmulld:
3526; GENERIC: # BB#0:
3527; GENERIC-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
3528; GENERIC-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
3529; GENERIC-NEXT: retq # sched: [1:1.00]
3530;
Simon Pilgrim946f08c2017-05-06 13:46:09 +00003531; HASWELL-LABEL: test_pmulld:
3532; HASWELL: # BB#0:
3533; HASWELL-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [10:2.00]
3534; HASWELL-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
Gadi Haberd76f7b82017-08-28 10:04:16 +00003535; HASWELL-NEXT: retq # sched: [2:1.00]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00003536;
Gadi Haber767d98b2017-08-30 08:08:50 +00003537; SKYLAKE-LABEL: test_pmulld:
3538; SKYLAKE: # BB#0:
3539; SKYLAKE-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [10:2.00]
3540; SKYLAKE-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [10:2.00]
3541; SKYLAKE-NEXT: retq # sched: [2:1.00]
3542;
Simon Pilgrim946f08c2017-05-06 13:46:09 +00003543; ZNVER1-LABEL: test_pmulld:
3544; ZNVER1: # BB#0:
Ashutosh Nemabfcac0b2017-08-31 12:38:35 +00003545; ZNVER1-NEXT: vpmulld %ymm1, %ymm0, %ymm0 # sched: [5:2.00]
3546; ZNVER1-NEXT: vpmulld (%rdi), %ymm0, %ymm0 # sched: [12:2.00]
3547; ZNVER1-NEXT: retq # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00003548 %1 = mul <8 x i32> %a0, %a1
3549 %2 = load <8 x i32>, <8 x i32> *%a2, align 32
3550 %3 = mul <8 x i32> %1, %2
3551 ret <8 x i32> %3
3552}
3553
3554define <16 x i16> @test_pmullw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
Simon Pilgrim84846982017-08-01 15:14:35 +00003555; GENERIC-LABEL: test_pmullw:
3556; GENERIC: # BB#0:
3557; GENERIC-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
3558; GENERIC-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
3559; GENERIC-NEXT: retq # sched: [1:1.00]
3560;
Simon Pilgrim946f08c2017-05-06 13:46:09 +00003561; HASWELL-LABEL: test_pmullw:
3562; HASWELL: # BB#0:
3563; HASWELL-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
Gadi Haberd76f7b82017-08-28 10:04:16 +00003564; HASWELL-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
3565; HASWELL-NEXT: retq # sched: [2:1.00]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00003566;
Gadi Haber767d98b2017-08-30 08:08:50 +00003567; SKYLAKE-LABEL: test_pmullw:
3568; SKYLAKE: # BB#0:
3569; SKYLAKE-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
3570; SKYLAKE-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
3571; SKYLAKE-NEXT: retq # sched: [2:1.00]
3572;
Simon Pilgrim946f08c2017-05-06 13:46:09 +00003573; ZNVER1-LABEL: test_pmullw:
3574; ZNVER1: # BB#0:
Craig Topper106b5b62017-07-19 02:45:14 +00003575; ZNVER1-NEXT: vpmullw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
3576; ZNVER1-NEXT: vpmullw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
Ashutosh Nemabfcac0b2017-08-31 12:38:35 +00003577; ZNVER1-NEXT: retq # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00003578 %1 = mul <16 x i16> %a0, %a1
3579 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
3580 %3 = mul <16 x i16> %1, %2
3581 ret <16 x i16> %3
3582}
3583
Simon Pilgrim0af5a7722017-09-12 15:01:20 +00003584define <4 x i64> @test_pmuludq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
3585; GENERIC-LABEL: test_pmuludq:
3586; GENERIC: # BB#0:
3587; GENERIC-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
3588; GENERIC-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
3589; GENERIC-NEXT: retq # sched: [1:1.00]
3590;
3591; HASWELL-LABEL: test_pmuludq:
3592; HASWELL: # BB#0:
3593; HASWELL-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
3594; HASWELL-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
3595; HASWELL-NEXT: retq # sched: [2:1.00]
3596;
3597; SKYLAKE-LABEL: test_pmuludq:
3598; SKYLAKE: # BB#0:
3599; SKYLAKE-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
3600; SKYLAKE-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
3601; SKYLAKE-NEXT: retq # sched: [2:1.00]
3602;
3603; ZNVER1-LABEL: test_pmuludq:
3604; ZNVER1: # BB#0:
3605; ZNVER1-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
3606; ZNVER1-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
3607; ZNVER1-NEXT: retq # sched: [1:0.50]
3608 %1 = call <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32> %a0, <8 x i32> %a1)
3609 %2 = bitcast <4 x i64> %1 to <8 x i32>
3610 %3 = load <8 x i32>, <8 x i32> *%a2, align 32
3611 %4 = call <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32> %2, <8 x i32> %3)
3612 ret <4 x i64> %4
3613}
3614declare <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32>, <8 x i32>) nounwind readnone
3615
Simon Pilgrim946f08c2017-05-06 13:46:09 +00003616define <4 x i64> @test_por(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
Simon Pilgrim84846982017-08-01 15:14:35 +00003617; GENERIC-LABEL: test_por:
3618; GENERIC: # BB#0:
3619; GENERIC-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
3620; GENERIC-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
3621; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
3622; GENERIC-NEXT: retq # sched: [1:1.00]
3623;
Simon Pilgrim946f08c2017-05-06 13:46:09 +00003624; HASWELL-LABEL: test_por:
3625; HASWELL: # BB#0:
3626; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
Gadi Haberd76f7b82017-08-28 10:04:16 +00003627; HASWELL-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00003628; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
Gadi Haberd76f7b82017-08-28 10:04:16 +00003629; HASWELL-NEXT: retq # sched: [2:1.00]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00003630;
Gadi Haber767d98b2017-08-30 08:08:50 +00003631; SKYLAKE-LABEL: test_por:
3632; SKYLAKE: # BB#0:
3633; SKYLAKE-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
3634; SKYLAKE-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
3635; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3636; SKYLAKE-NEXT: retq # sched: [2:1.00]
3637;
Simon Pilgrim946f08c2017-05-06 13:46:09 +00003638; ZNVER1-LABEL: test_por:
3639; ZNVER1: # BB#0:
Craig Topper106b5b62017-07-19 02:45:14 +00003640; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
3641; ZNVER1-NEXT: vpor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
3642; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
Ashutosh Nemabfcac0b2017-08-31 12:38:35 +00003643; ZNVER1-NEXT: retq # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00003644 %1 = or <4 x i64> %a0, %a1
3645 %2 = load <4 x i64>, <4 x i64> *%a2, align 32
3646 %3 = or <4 x i64> %1, %2
3647 %4 = add <4 x i64> %3, %a1
3648 ret <4 x i64> %4
3649}
3650
Simon Pilgrim0af5a7722017-09-12 15:01:20 +00003651define <4 x i64> @test_psadbw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
3652; GENERIC-LABEL: test_psadbw:
3653; GENERIC: # BB#0:
3654; GENERIC-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
3655; GENERIC-NEXT: vpsadbw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
3656; GENERIC-NEXT: retq # sched: [1:1.00]
3657;
3658; HASWELL-LABEL: test_psadbw:
3659; HASWELL: # BB#0:
3660; HASWELL-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
3661; HASWELL-NEXT: vpsadbw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
3662; HASWELL-NEXT: retq # sched: [2:1.00]
3663;
3664; SKYLAKE-LABEL: test_psadbw:
3665; SKYLAKE: # BB#0:
3666; SKYLAKE-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 # sched: [5:1.00]
3667; SKYLAKE-NEXT: vpsadbw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
3668; SKYLAKE-NEXT: retq # sched: [2:1.00]
3669;
3670; ZNVER1-LABEL: test_psadbw:
3671; ZNVER1: # BB#0:
3672; ZNVER1-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 # sched: [4:1.00]
3673; ZNVER1-NEXT: vpsadbw (%rdi), %ymm0, %ymm0 # sched: [11:1.00]
3674; ZNVER1-NEXT: retq # sched: [1:0.50]
3675 %1 = call <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8> %a0, <32 x i8> %a1)
3676 %2 = bitcast <4 x i64> %1 to <32 x i8>
3677 %3 = load <32 x i8>, <32 x i8> *%a2, align 32
3678 %4 = call <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8> %2, <32 x i8> %3)
3679 ret <4 x i64> %4
3680}
3681declare <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8>, <32 x i8>) nounwind readnone
3682
Simon Pilgrim5a931c62017-09-12 11:17:01 +00003683define <32 x i8> @test_pshufb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
3684; GENERIC-LABEL: test_pshufb:
3685; GENERIC: # BB#0:
3686; GENERIC-NEXT: vpshufb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
3687; GENERIC-NEXT: vpshufb (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
3688; GENERIC-NEXT: retq # sched: [1:1.00]
3689;
3690; HASWELL-LABEL: test_pshufb:
3691; HASWELL: # BB#0:
3692; HASWELL-NEXT: vpshufb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
3693; HASWELL-NEXT: vpshufb (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
3694; HASWELL-NEXT: retq # sched: [2:1.00]
3695;
3696; SKYLAKE-LABEL: test_pshufb:
3697; SKYLAKE: # BB#0:
3698; SKYLAKE-NEXT: vpshufb %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
3699; SKYLAKE-NEXT: vpshufb (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
3700; SKYLAKE-NEXT: retq # sched: [2:1.00]
3701;
3702; ZNVER1-LABEL: test_pshufb:
3703; ZNVER1: # BB#0:
3704; ZNVER1-NEXT: vpshufb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
3705; ZNVER1-NEXT: vpshufb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
3706; ZNVER1-NEXT: retq # sched: [1:0.50]
3707 %1 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> %a1)
3708 %2 = load <32 x i8>, <32 x i8> *%a2, align 32
3709 %3 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> %2)
3710 ret <32 x i8> %3
3711}
3712declare <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8>, <32 x i8>) nounwind readnone
3713
3714define <8 x i32> @test_pshufd(<8 x i32> %a0, <8 x i32> *%a1) {
3715; GENERIC-LABEL: test_pshufd:
3716; GENERIC: # BB#0:
3717; GENERIC-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
3718; GENERIC-NEXT: vpshufd {{.*#+}} ymm1 = mem[1,0,3,2,5,4,7,6] sched: [5:1.00]
3719; GENERIC-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
3720; GENERIC-NEXT: retq # sched: [1:1.00]
3721;
3722; HASWELL-LABEL: test_pshufd:
3723; HASWELL: # BB#0:
3724; HASWELL-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
3725; HASWELL-NEXT: vpshufd {{.*#+}} ymm1 = mem[1,0,3,2,5,4,7,6] sched: [1:1.00]
3726; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
3727; HASWELL-NEXT: retq # sched: [2:1.00]
3728;
3729; SKYLAKE-LABEL: test_pshufd:
3730; SKYLAKE: # BB#0:
3731; SKYLAKE-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:1.00]
3732; SKYLAKE-NEXT: vpshufd {{.*#+}} ymm1 = mem[1,0,3,2,5,4,7,6] sched: [1:1.00]
3733; SKYLAKE-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
3734; SKYLAKE-NEXT: retq # sched: [2:1.00]
3735;
3736; ZNVER1-LABEL: test_pshufd:
3737; ZNVER1: # BB#0:
3738; ZNVER1-NEXT: vpshufd {{.*#+}} ymm1 = mem[1,0,3,2,5,4,7,6] sched: [8:0.50]
3739; ZNVER1-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] sched: [1:0.25]
3740; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
3741; ZNVER1-NEXT: retq # sched: [1:0.50]
3742 %1 = shufflevector <8 x i32> %a0, <8 x i32> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
3743 %2 = load <8 x i32>, <8 x i32> *%a1, align 32
3744 %3 = shufflevector <8 x i32> %2, <8 x i32> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
3745 %4 = or <8 x i32> %1, %3
3746 ret <8 x i32> %4
3747}
3748
3749define <16 x i16> @test_pshufhw(<16 x i16> %a0, <16 x i16> *%a1) {
3750; GENERIC-LABEL: test_pshufhw:
3751; GENERIC: # BB#0:
3752; GENERIC-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,4,8,9,10,11,15,14,13,12] sched: [1:1.00]
3753; GENERIC-NEXT: vpshufhw {{.*#+}} ymm1 = mem[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14] sched: [5:1.00]
3754; GENERIC-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
3755; GENERIC-NEXT: retq # sched: [1:1.00]
3756;
3757; HASWELL-LABEL: test_pshufhw:
3758; HASWELL: # BB#0:
3759; HASWELL-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,4,8,9,10,11,15,14,13,12] sched: [1:1.00]
3760; HASWELL-NEXT: vpshufhw {{.*#+}} ymm1 = mem[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14] sched: [1:1.00]
3761; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
3762; HASWELL-NEXT: retq # sched: [2:1.00]
3763;
3764; SKYLAKE-LABEL: test_pshufhw:
3765; SKYLAKE: # BB#0:
3766; SKYLAKE-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,4,8,9,10,11,15,14,13,12] sched: [1:1.00]
3767; SKYLAKE-NEXT: vpshufhw {{.*#+}} ymm1 = mem[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14] sched: [1:1.00]
3768; SKYLAKE-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
3769; SKYLAKE-NEXT: retq # sched: [2:1.00]
3770;
3771; ZNVER1-LABEL: test_pshufhw:
3772; ZNVER1: # BB#0:
3773; ZNVER1-NEXT: vpshufhw {{.*#+}} ymm1 = mem[0,1,2,3,5,4,7,6,8,9,10,11,13,12,15,14] sched: [8:0.50]
3774; ZNVER1-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,4,8,9,10,11,15,14,13,12] sched: [1:0.25]
3775; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
3776; ZNVER1-NEXT: retq # sched: [1:0.50]
3777 %1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 6, i32 5, i32 4, i32 8, i32 9, i32 10, i32 11, i32 15, i32 14, i32 13, i32 12>
3778 %2 = load <16 x i16>, <16 x i16> *%a1, align 32
3779 %3 = shufflevector <16 x i16> %2, <16 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 4, i32 7, i32 6, i32 8, i32 9, i32 10, i32 11, i32 13, i32 12, i32 15, i32 14>
3780 %4 = or <16 x i16> %1, %3
3781 ret <16 x i16> %4
3782}
3783
3784define <16 x i16> @test_pshuflw(<16 x i16> %a0, <16 x i16> *%a1) {
3785; GENERIC-LABEL: test_pshuflw:
3786; GENERIC: # BB#0:
3787; GENERIC-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,1,0,4,5,6,7,11,10,9,8,12,13,14,15] sched: [1:1.00]
3788; GENERIC-NEXT: vpshuflw {{.*#+}} ymm1 = mem[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15] sched: [5:1.00]
3789; GENERIC-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
3790; GENERIC-NEXT: retq # sched: [1:1.00]
3791;
3792; HASWELL-LABEL: test_pshuflw:
3793; HASWELL: # BB#0:
3794; HASWELL-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,1,0,4,5,6,7,11,10,9,8,12,13,14,15] sched: [1:1.00]
3795; HASWELL-NEXT: vpshuflw {{.*#+}} ymm1 = mem[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15] sched: [1:1.00]
3796; HASWELL-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
3797; HASWELL-NEXT: retq # sched: [2:1.00]
3798;
3799; SKYLAKE-LABEL: test_pshuflw:
3800; SKYLAKE: # BB#0:
3801; SKYLAKE-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,1,0,4,5,6,7,11,10,9,8,12,13,14,15] sched: [1:1.00]
3802; SKYLAKE-NEXT: vpshuflw {{.*#+}} ymm1 = mem[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15] sched: [1:1.00]
3803; SKYLAKE-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
3804; SKYLAKE-NEXT: retq # sched: [2:1.00]
3805;
3806; ZNVER1-LABEL: test_pshuflw:
3807; ZNVER1: # BB#0:
3808; ZNVER1-NEXT: vpshuflw {{.*#+}} ymm1 = mem[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15] sched: [8:0.50]
3809; ZNVER1-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,1,0,4,5,6,7,11,10,9,8,12,13,14,15] sched: [1:0.25]
3810; ZNVER1-NEXT: vpor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
3811; ZNVER1-NEXT: retq # sched: [1:0.50]
3812 %1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7, i32 11, i32 10, i32 9, i32 8, i32 12, i32 13, i32 14, i32 15>
3813 %2 = load <16 x i16>, <16 x i16> *%a1, align 32
3814 %3 = shufflevector <16 x i16> %2, <16 x i16> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 4, i32 5, i32 6, i32 7, i32 9, i32 8, i32 11, i32 10, i32 12, i32 13, i32 14, i32 15>
3815 %4 = or <16 x i16> %1, %3
3816 ret <16 x i16> %4
3817}
3818
Simon Pilgrim0af5a7722017-09-12 15:01:20 +00003819define <32 x i8> @test_psignb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
3820; GENERIC-LABEL: test_psignb:
3821; GENERIC: # BB#0:
3822; GENERIC-NEXT: vpsignb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
3823; GENERIC-NEXT: vpsignb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
3824; GENERIC-NEXT: retq # sched: [1:1.00]
3825;
3826; HASWELL-LABEL: test_psignb:
3827; HASWELL: # BB#0:
3828; HASWELL-NEXT: vpsignb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3829; HASWELL-NEXT: vpsignb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
3830; HASWELL-NEXT: retq # sched: [2:1.00]
3831;
3832; SKYLAKE-LABEL: test_psignb:
3833; SKYLAKE: # BB#0:
3834; SKYLAKE-NEXT: vpsignb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3835; SKYLAKE-NEXT: vpsignb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
3836; SKYLAKE-NEXT: retq # sched: [2:1.00]
3837;
3838; ZNVER1-LABEL: test_psignb:
3839; ZNVER1: # BB#0:
3840; ZNVER1-NEXT: vpsignb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
3841; ZNVER1-NEXT: vpsignb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
3842; ZNVER1-NEXT: retq # sched: [1:0.50]
3843 %1 = call <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8> %a0, <32 x i8> %a1)
3844 %2 = load <32 x i8>, <32 x i8> *%a2, align 32
3845 %3 = call <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8> %1, <32 x i8> %2)
3846 ret <32 x i8> %3
3847}
3848declare <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8>, <32 x i8>) nounwind readnone
3849
3850define <8 x i32> @test_psignd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
3851; GENERIC-LABEL: test_psignd:
3852; GENERIC: # BB#0:
3853; GENERIC-NEXT: vpsignd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
3854; GENERIC-NEXT: vpsignd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
3855; GENERIC-NEXT: retq # sched: [1:1.00]
3856;
3857; HASWELL-LABEL: test_psignd:
3858; HASWELL: # BB#0:
3859; HASWELL-NEXT: vpsignd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3860; HASWELL-NEXT: vpsignd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
3861; HASWELL-NEXT: retq # sched: [2:1.00]
3862;
3863; SKYLAKE-LABEL: test_psignd:
3864; SKYLAKE: # BB#0:
3865; SKYLAKE-NEXT: vpsignd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3866; SKYLAKE-NEXT: vpsignd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
3867; SKYLAKE-NEXT: retq # sched: [2:1.00]
3868;
3869; ZNVER1-LABEL: test_psignd:
3870; ZNVER1: # BB#0:
3871; ZNVER1-NEXT: vpsignd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
3872; ZNVER1-NEXT: vpsignd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
3873; ZNVER1-NEXT: retq # sched: [1:0.50]
3874 %1 = call <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32> %a0, <8 x i32> %a1)
3875 %2 = load <8 x i32>, <8 x i32> *%a2, align 32
3876 %3 = call <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32> %1, <8 x i32> %2)
3877 ret <8 x i32> %3
3878}
3879declare <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32>, <8 x i32>) nounwind readnone
3880
3881define <16 x i16> @test_psignw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
3882; GENERIC-LABEL: test_psignw:
3883; GENERIC: # BB#0:
3884; GENERIC-NEXT: vpsignw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
3885; GENERIC-NEXT: vpsignw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
3886; GENERIC-NEXT: retq # sched: [1:1.00]
3887;
3888; HASWELL-LABEL: test_psignw:
3889; HASWELL: # BB#0:
3890; HASWELL-NEXT: vpsignw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3891; HASWELL-NEXT: vpsignw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
3892; HASWELL-NEXT: retq # sched: [2:1.00]
3893;
3894; SKYLAKE-LABEL: test_psignw:
3895; SKYLAKE: # BB#0:
3896; SKYLAKE-NEXT: vpsignw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
3897; SKYLAKE-NEXT: vpsignw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
3898; SKYLAKE-NEXT: retq # sched: [2:1.00]
3899;
3900; ZNVER1-LABEL: test_psignw:
3901; ZNVER1: # BB#0:
3902; ZNVER1-NEXT: vpsignw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
3903; ZNVER1-NEXT: vpsignw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
3904; ZNVER1-NEXT: retq # sched: [1:0.50]
3905 %1 = call <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16> %a0, <16 x i16> %a1)
3906 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
3907 %3 = call <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16> %1, <16 x i16> %2)
3908 ret <16 x i16> %3
3909}
3910declare <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16>, <16 x i16>) nounwind readnone
3911
3912define <8 x i32> @test_pslld(<8 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
3913; GENERIC-LABEL: test_pslld:
3914; GENERIC: # BB#0:
3915; GENERIC-NEXT: vpslld %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
3916; GENERIC-NEXT: vpslld (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
3917; GENERIC-NEXT: vpslld $2, %ymm0, %ymm0 # sched: [1:1.00]
3918; GENERIC-NEXT: retq # sched: [1:1.00]
3919;
3920; HASWELL-LABEL: test_pslld:
3921; HASWELL: # BB#0:
3922; HASWELL-NEXT: vpslld %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
3923; HASWELL-NEXT: vpslld (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
3924; HASWELL-NEXT: vpslld $2, %ymm0, %ymm0 # sched: [1:1.00]
3925; HASWELL-NEXT: retq # sched: [2:1.00]
3926;
3927; SKYLAKE-LABEL: test_pslld:
3928; SKYLAKE: # BB#0:
3929; SKYLAKE-NEXT: vpslld %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
3930; SKYLAKE-NEXT: vpslld (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
3931; SKYLAKE-NEXT: vpslld $2, %ymm0, %ymm0 # sched: [1:1.00]
3932; SKYLAKE-NEXT: retq # sched: [2:1.00]
3933;
3934; ZNVER1-LABEL: test_pslld:
3935; ZNVER1: # BB#0:
3936; ZNVER1-NEXT: vpslld %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
3937; ZNVER1-NEXT: vpslld (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
3938; ZNVER1-NEXT: vpslld $2, %ymm0, %ymm0 # sched: [1:0.25]
3939; ZNVER1-NEXT: retq # sched: [1:0.50]
3940 %1 = call <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32> %a0, <4 x i32> %a1)
3941 %2 = load <4 x i32>, <4 x i32> *%a2, align 16
3942 %3 = call <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32> %1, <4 x i32> %2)
3943 %4 = shl <8 x i32> %3, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
3944 ret <8 x i32> %4
3945}
3946declare <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32>, <4 x i32>) nounwind readnone
3947
Simon Pilgrim76418aa2017-09-12 15:52:01 +00003948define <32 x i8> @test_pslldq(<32 x i8> %a0) {
3949; GENERIC-LABEL: test_pslldq:
3950; GENERIC: # BB#0:
3951; GENERIC-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28] sched: [1:1.00]
3952; GENERIC-NEXT: retq # sched: [1:1.00]
3953;
3954; HASWELL-LABEL: test_pslldq:
3955; HASWELL: # BB#0:
3956; HASWELL-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28] sched: [1:1.00]
3957; HASWELL-NEXT: retq # sched: [2:1.00]
3958;
3959; SKYLAKE-LABEL: test_pslldq:
3960; SKYLAKE: # BB#0:
3961; SKYLAKE-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28] sched: [1:1.00]
3962; SKYLAKE-NEXT: retq # sched: [2:1.00]
3963;
3964; ZNVER1-LABEL: test_pslldq:
3965; ZNVER1: # BB#0:
3966; ZNVER1-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10,11,12],zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26,27,28] sched: [2:1.00]
3967; ZNVER1-NEXT: retq # sched: [1:0.50]
3968 %1 = shufflevector <32 x i8> zeroinitializer, <32 x i8> %a0, <32 x i32> <i32 13, i32 14, i32 15, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 29, i32 30, i32 31, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60>
3969 ret <32 x i8> %1
3970}
3971
Simon Pilgrim0af5a7722017-09-12 15:01:20 +00003972define <4 x i64> @test_psllq(<4 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
3973; GENERIC-LABEL: test_psllq:
3974; GENERIC: # BB#0:
3975; GENERIC-NEXT: vpsllq %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
3976; GENERIC-NEXT: vpsllq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
3977; GENERIC-NEXT: vpsllq $2, %ymm0, %ymm0 # sched: [1:1.00]
3978; GENERIC-NEXT: retq # sched: [1:1.00]
3979;
3980; HASWELL-LABEL: test_psllq:
3981; HASWELL: # BB#0:
3982; HASWELL-NEXT: vpsllq %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
3983; HASWELL-NEXT: vpsllq (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
3984; HASWELL-NEXT: vpsllq $2, %ymm0, %ymm0 # sched: [1:1.00]
3985; HASWELL-NEXT: retq # sched: [2:1.00]
3986;
3987; SKYLAKE-LABEL: test_psllq:
3988; SKYLAKE: # BB#0:
3989; SKYLAKE-NEXT: vpsllq %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
3990; SKYLAKE-NEXT: vpsllq (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
3991; SKYLAKE-NEXT: vpsllq $2, %ymm0, %ymm0 # sched: [1:1.00]
3992; SKYLAKE-NEXT: retq # sched: [2:1.00]
3993;
3994; ZNVER1-LABEL: test_psllq:
3995; ZNVER1: # BB#0:
3996; ZNVER1-NEXT: vpsllq %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
3997; ZNVER1-NEXT: vpsllq (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
3998; ZNVER1-NEXT: vpsllq $2, %ymm0, %ymm0 # sched: [1:0.25]
3999; ZNVER1-NEXT: retq # sched: [1:0.50]
4000 %1 = call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> %a0, <2 x i64> %a1)
4001 %2 = load <2 x i64>, <2 x i64> *%a2, align 16
4002 %3 = call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> %1, <2 x i64> %2)
4003 %4 = shl <4 x i64> %3, <i64 2, i64 2, i64 2, i64 2>
4004 ret <4 x i64> %4
4005}
4006declare <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64>, <2 x i64>) nounwind readnone
4007
4008define <4 x i32> @test_psllvd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
4009; GENERIC-LABEL: test_psllvd:
4010; GENERIC: # BB#0:
4011; GENERIC-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
4012; GENERIC-NEXT: vpsllvd (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
4013; GENERIC-NEXT: retq # sched: [1:1.00]
4014;
4015; HASWELL-LABEL: test_psllvd:
4016; HASWELL: # BB#0:
4017; HASWELL-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
4018; HASWELL-NEXT: vpsllvd (%rdi), %xmm0, %xmm0 # sched: [3:2.00]
4019; HASWELL-NEXT: retq # sched: [2:1.00]
4020;
4021; SKYLAKE-LABEL: test_psllvd:
4022; SKYLAKE: # BB#0:
4023; SKYLAKE-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
4024; SKYLAKE-NEXT: vpsllvd (%rdi), %xmm0, %xmm0 # sched: [3:2.00]
4025; SKYLAKE-NEXT: retq # sched: [2:1.00]
4026;
4027; ZNVER1-LABEL: test_psllvd:
4028; ZNVER1: # BB#0:
4029; ZNVER1-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
4030; ZNVER1-NEXT: vpsllvd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
4031; ZNVER1-NEXT: retq # sched: [1:0.50]
4032 %1 = call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %a0, <4 x i32> %a1)
4033 %2 = load <4 x i32>, <4 x i32> *%a2, align 16
4034 %3 = call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %1, <4 x i32> %2)
4035 ret <4 x i32> %3
4036}
4037declare <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32>, <4 x i32>) nounwind readnone
4038
4039define <8 x i32> @test_psllvd_ymm(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
4040; GENERIC-LABEL: test_psllvd_ymm:
4041; GENERIC: # BB#0:
4042; GENERIC-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
4043; GENERIC-NEXT: vpsllvd (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
4044; GENERIC-NEXT: retq # sched: [1:1.00]
4045;
4046; HASWELL-LABEL: test_psllvd_ymm:
4047; HASWELL: # BB#0:
4048; HASWELL-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
4049; HASWELL-NEXT: vpsllvd (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
4050; HASWELL-NEXT: retq # sched: [2:1.00]
4051;
4052; SKYLAKE-LABEL: test_psllvd_ymm:
4053; SKYLAKE: # BB#0:
4054; SKYLAKE-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
4055; SKYLAKE-NEXT: vpsllvd (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
4056; SKYLAKE-NEXT: retq # sched: [2:1.00]
4057;
4058; ZNVER1-LABEL: test_psllvd_ymm:
4059; ZNVER1: # BB#0:
4060; ZNVER1-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
4061; ZNVER1-NEXT: vpsllvd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
4062; ZNVER1-NEXT: retq # sched: [1:0.50]
4063 %1 = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %a0, <8 x i32> %a1)
4064 %2 = load <8 x i32>, <8 x i32> *%a2, align 32
4065 %3 = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %1, <8 x i32> %2)
4066 ret <8 x i32> %3
4067}
4068declare <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32>, <8 x i32>) nounwind readnone
4069
4070define <2 x i64> @test_psllvq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
4071; GENERIC-LABEL: test_psllvq:
4072; GENERIC: # BB#0:
4073; GENERIC-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
4074; GENERIC-NEXT: vpsllvq (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
4075; GENERIC-NEXT: retq # sched: [1:1.00]
4076;
4077; HASWELL-LABEL: test_psllvq:
4078; HASWELL: # BB#0:
4079; HASWELL-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
4080; HASWELL-NEXT: vpsllvq (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
4081; HASWELL-NEXT: retq # sched: [2:1.00]
4082;
4083; SKYLAKE-LABEL: test_psllvq:
4084; SKYLAKE: # BB#0:
4085; SKYLAKE-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
4086; SKYLAKE-NEXT: vpsllvq (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
4087; SKYLAKE-NEXT: retq # sched: [2:1.00]
4088;
4089; ZNVER1-LABEL: test_psllvq:
4090; ZNVER1: # BB#0:
4091; ZNVER1-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
4092; ZNVER1-NEXT: vpsllvq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
4093; ZNVER1-NEXT: retq # sched: [1:0.50]
4094 %1 = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> %a0, <2 x i64> %a1)
4095 %2 = load <2 x i64>, <2 x i64> *%a2, align 16
4096 %3 = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> %1, <2 x i64> %2)
4097 ret <2 x i64> %3
4098}
4099declare <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64>, <2 x i64>) nounwind readnone
4100
4101define <4 x i64> @test_psllvq_ymm(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
4102; GENERIC-LABEL: test_psllvq_ymm:
4103; GENERIC: # BB#0:
4104; GENERIC-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
4105; GENERIC-NEXT: vpsllvq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
4106; GENERIC-NEXT: retq # sched: [1:1.00]
4107;
4108; HASWELL-LABEL: test_psllvq_ymm:
4109; HASWELL: # BB#0:
4110; HASWELL-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
4111; HASWELL-NEXT: vpsllvq (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
4112; HASWELL-NEXT: retq # sched: [2:1.00]
4113;
4114; SKYLAKE-LABEL: test_psllvq_ymm:
4115; SKYLAKE: # BB#0:
4116; SKYLAKE-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
4117; SKYLAKE-NEXT: vpsllvq (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
4118; SKYLAKE-NEXT: retq # sched: [2:1.00]
4119;
4120; ZNVER1-LABEL: test_psllvq_ymm:
4121; ZNVER1: # BB#0:
4122; ZNVER1-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
4123; ZNVER1-NEXT: vpsllvq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
4124; ZNVER1-NEXT: retq # sched: [1:0.50]
4125 %1 = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %a0, <4 x i64> %a1)
4126 %2 = load <4 x i64>, <4 x i64> *%a2, align 32
4127 %3 = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %1, <4 x i64> %2)
4128 ret <4 x i64> %3
4129}
4130declare <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64>, <4 x i64>) nounwind readnone
4131
4132define <16 x i16> @test_psllw(<16 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
4133; GENERIC-LABEL: test_psllw:
4134; GENERIC: # BB#0:
4135; GENERIC-NEXT: vpsllw %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
4136; GENERIC-NEXT: vpsllw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
4137; GENERIC-NEXT: vpsllw $2, %ymm0, %ymm0 # sched: [1:1.00]
4138; GENERIC-NEXT: retq # sched: [1:1.00]
4139;
4140; HASWELL-LABEL: test_psllw:
4141; HASWELL: # BB#0:
4142; HASWELL-NEXT: vpsllw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
4143; HASWELL-NEXT: vpsllw (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
4144; HASWELL-NEXT: vpsllw $2, %ymm0, %ymm0 # sched: [1:1.00]
4145; HASWELL-NEXT: retq # sched: [2:1.00]
4146;
4147; SKYLAKE-LABEL: test_psllw:
4148; SKYLAKE: # BB#0:
4149; SKYLAKE-NEXT: vpsllw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
4150; SKYLAKE-NEXT: vpsllw (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
4151; SKYLAKE-NEXT: vpsllw $2, %ymm0, %ymm0 # sched: [1:1.00]
4152; SKYLAKE-NEXT: retq # sched: [2:1.00]
4153;
4154; ZNVER1-LABEL: test_psllw:
4155; ZNVER1: # BB#0:
4156; ZNVER1-NEXT: vpsllw %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
4157; ZNVER1-NEXT: vpsllw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
4158; ZNVER1-NEXT: vpsllw $2, %ymm0, %ymm0 # sched: [1:0.25]
4159; ZNVER1-NEXT: retq # sched: [1:0.50]
4160 %1 = call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> %a0, <8 x i16> %a1)
4161 %2 = load <8 x i16>, <8 x i16> *%a2, align 16
4162 %3 = call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> %1, <8 x i16> %2)
4163 %4 = shl <16 x i16> %3, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
4164 ret <16 x i16> %4
4165}
4166declare <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16>, <8 x i16>) nounwind readnone
4167
4168define <8 x i32> @test_psrad(<8 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
4169; GENERIC-LABEL: test_psrad:
4170; GENERIC: # BB#0:
4171; GENERIC-NEXT: vpsrad %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
4172; GENERIC-NEXT: vpsrad (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
4173; GENERIC-NEXT: vpsrad $2, %ymm0, %ymm0 # sched: [1:1.00]
4174; GENERIC-NEXT: retq # sched: [1:1.00]
4175;
4176; HASWELL-LABEL: test_psrad:
4177; HASWELL: # BB#0:
4178; HASWELL-NEXT: vpsrad %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
4179; HASWELL-NEXT: vpsrad (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
4180; HASWELL-NEXT: vpsrad $2, %ymm0, %ymm0 # sched: [1:1.00]
4181; HASWELL-NEXT: retq # sched: [2:1.00]
4182;
4183; SKYLAKE-LABEL: test_psrad:
4184; SKYLAKE: # BB#0:
4185; SKYLAKE-NEXT: vpsrad %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
4186; SKYLAKE-NEXT: vpsrad (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
4187; SKYLAKE-NEXT: vpsrad $2, %ymm0, %ymm0 # sched: [1:1.00]
4188; SKYLAKE-NEXT: retq # sched: [2:1.00]
4189;
4190; ZNVER1-LABEL: test_psrad:
4191; ZNVER1: # BB#0:
4192; ZNVER1-NEXT: vpsrad %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
4193; ZNVER1-NEXT: vpsrad (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
4194; ZNVER1-NEXT: vpsrad $2, %ymm0, %ymm0 # sched: [1:0.25]
4195; ZNVER1-NEXT: retq # sched: [1:0.50]
4196 %1 = call <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32> %a0, <4 x i32> %a1)
4197 %2 = load <4 x i32>, <4 x i32> *%a2, align 16
4198 %3 = call <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32> %1, <4 x i32> %2)
4199 %4 = ashr <8 x i32> %3, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
4200 ret <8 x i32> %4
4201}
4202declare <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32>, <4 x i32>) nounwind readnone
4203
4204define <4 x i32> @test_psravd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
4205; GENERIC-LABEL: test_psravd:
4206; GENERIC: # BB#0:
4207; GENERIC-NEXT: vpsravd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
4208; GENERIC-NEXT: vpsravd (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
4209; GENERIC-NEXT: retq # sched: [1:1.00]
4210;
4211; HASWELL-LABEL: test_psravd:
4212; HASWELL: # BB#0:
4213; HASWELL-NEXT: vpsravd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
4214; HASWELL-NEXT: vpsravd (%rdi), %xmm0, %xmm0 # sched: [3:2.00]
4215; HASWELL-NEXT: retq # sched: [2:1.00]
4216;
4217; SKYLAKE-LABEL: test_psravd:
4218; SKYLAKE: # BB#0:
4219; SKYLAKE-NEXT: vpsravd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
4220; SKYLAKE-NEXT: vpsravd (%rdi), %xmm0, %xmm0 # sched: [3:2.00]
4221; SKYLAKE-NEXT: retq # sched: [2:1.00]
4222;
4223; ZNVER1-LABEL: test_psravd:
4224; ZNVER1: # BB#0:
4225; ZNVER1-NEXT: vpsravd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
4226; ZNVER1-NEXT: vpsravd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
4227; ZNVER1-NEXT: retq # sched: [1:0.50]
4228 %1 = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %a0, <4 x i32> %a1)
4229 %2 = load <4 x i32>, <4 x i32> *%a2, align 16
4230 %3 = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %1, <4 x i32> %2)
4231 ret <4 x i32> %3
4232}
4233declare <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32>, <4 x i32>) nounwind readnone
4234
4235define <8 x i32> @test_psravd_ymm(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
4236; GENERIC-LABEL: test_psravd_ymm:
4237; GENERIC: # BB#0:
4238; GENERIC-NEXT: vpsravd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
4239; GENERIC-NEXT: vpsravd (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
4240; GENERIC-NEXT: retq # sched: [1:1.00]
4241;
4242; HASWELL-LABEL: test_psravd_ymm:
4243; HASWELL: # BB#0:
4244; HASWELL-NEXT: vpsravd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
4245; HASWELL-NEXT: vpsravd (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
4246; HASWELL-NEXT: retq # sched: [2:1.00]
4247;
4248; SKYLAKE-LABEL: test_psravd_ymm:
4249; SKYLAKE: # BB#0:
4250; SKYLAKE-NEXT: vpsravd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
4251; SKYLAKE-NEXT: vpsravd (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
4252; SKYLAKE-NEXT: retq # sched: [2:1.00]
4253;
4254; ZNVER1-LABEL: test_psravd_ymm:
4255; ZNVER1: # BB#0:
4256; ZNVER1-NEXT: vpsravd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
4257; ZNVER1-NEXT: vpsravd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
4258; ZNVER1-NEXT: retq # sched: [1:0.50]
4259 %1 = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %a0, <8 x i32> %a1)
4260 %2 = load <8 x i32>, <8 x i32> *%a2, align 32
4261 %3 = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %1, <8 x i32> %2)
4262 ret <8 x i32> %3
4263}
4264declare <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32>, <8 x i32>) nounwind readnone
4265
4266define <16 x i16> @test_psraw(<16 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
4267; GENERIC-LABEL: test_psraw:
4268; GENERIC: # BB#0:
4269; GENERIC-NEXT: vpsraw %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
4270; GENERIC-NEXT: vpsraw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
4271; GENERIC-NEXT: vpsraw $2, %ymm0, %ymm0 # sched: [1:1.00]
4272; GENERIC-NEXT: retq # sched: [1:1.00]
4273;
4274; HASWELL-LABEL: test_psraw:
4275; HASWELL: # BB#0:
4276; HASWELL-NEXT: vpsraw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
4277; HASWELL-NEXT: vpsraw (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
4278; HASWELL-NEXT: vpsraw $2, %ymm0, %ymm0 # sched: [1:1.00]
4279; HASWELL-NEXT: retq # sched: [2:1.00]
4280;
4281; SKYLAKE-LABEL: test_psraw:
4282; SKYLAKE: # BB#0:
4283; SKYLAKE-NEXT: vpsraw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
4284; SKYLAKE-NEXT: vpsraw (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
4285; SKYLAKE-NEXT: vpsraw $2, %ymm0, %ymm0 # sched: [1:1.00]
4286; SKYLAKE-NEXT: retq # sched: [2:1.00]
4287;
4288; ZNVER1-LABEL: test_psraw:
4289; ZNVER1: # BB#0:
4290; ZNVER1-NEXT: vpsraw %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
4291; ZNVER1-NEXT: vpsraw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
4292; ZNVER1-NEXT: vpsraw $2, %ymm0, %ymm0 # sched: [1:0.25]
4293; ZNVER1-NEXT: retq # sched: [1:0.50]
4294 %1 = call <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16> %a0, <8 x i16> %a1)
4295 %2 = load <8 x i16>, <8 x i16> *%a2, align 16
4296 %3 = call <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16> %1, <8 x i16> %2)
4297 %4 = ashr <16 x i16> %3, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
4298 ret <16 x i16> %4
4299}
4300declare <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16>, <8 x i16>) nounwind readnone
4301
4302define <8 x i32> @test_psrld(<8 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
4303; GENERIC-LABEL: test_psrld:
4304; GENERIC: # BB#0:
4305; GENERIC-NEXT: vpsrld %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
4306; GENERIC-NEXT: vpsrld (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
4307; GENERIC-NEXT: vpsrld $2, %ymm0, %ymm0 # sched: [1:1.00]
4308; GENERIC-NEXT: retq # sched: [1:1.00]
4309;
4310; HASWELL-LABEL: test_psrld:
4311; HASWELL: # BB#0:
4312; HASWELL-NEXT: vpsrld %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
4313; HASWELL-NEXT: vpsrld (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
4314; HASWELL-NEXT: vpsrld $2, %ymm0, %ymm0 # sched: [1:1.00]
4315; HASWELL-NEXT: retq # sched: [2:1.00]
4316;
4317; SKYLAKE-LABEL: test_psrld:
4318; SKYLAKE: # BB#0:
4319; SKYLAKE-NEXT: vpsrld %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
4320; SKYLAKE-NEXT: vpsrld (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
4321; SKYLAKE-NEXT: vpsrld $2, %ymm0, %ymm0 # sched: [1:1.00]
4322; SKYLAKE-NEXT: retq # sched: [2:1.00]
4323;
4324; ZNVER1-LABEL: test_psrld:
4325; ZNVER1: # BB#0:
4326; ZNVER1-NEXT: vpsrld %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
4327; ZNVER1-NEXT: vpsrld (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
4328; ZNVER1-NEXT: vpsrld $2, %ymm0, %ymm0 # sched: [1:0.25]
4329; ZNVER1-NEXT: retq # sched: [1:0.50]
4330 %1 = call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> %a0, <4 x i32> %a1)
4331 %2 = load <4 x i32>, <4 x i32> *%a2, align 16
4332 %3 = call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> %1, <4 x i32> %2)
4333 %4 = lshr <8 x i32> %3, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
4334 ret <8 x i32> %4
4335}
4336declare <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32>, <4 x i32>) nounwind readnone
4337
Simon Pilgrim76418aa2017-09-12 15:52:01 +00004338define <32 x i8> @test_psrldq(<32 x i8> %a0) {
4339; GENERIC-LABEL: test_psrldq:
4340; GENERIC: # BB#0:
4341; GENERIC-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero sched: [1:1.00]
4342; GENERIC-NEXT: retq # sched: [1:1.00]
4343;
4344; HASWELL-LABEL: test_psrldq:
4345; HASWELL: # BB#0:
4346; HASWELL-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero sched: [1:1.00]
4347; HASWELL-NEXT: retq # sched: [2:1.00]
4348;
4349; SKYLAKE-LABEL: test_psrldq:
4350; SKYLAKE: # BB#0:
4351; SKYLAKE-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero sched: [1:1.00]
4352; SKYLAKE-NEXT: retq # sched: [2:1.00]
4353;
4354; ZNVER1-LABEL: test_psrldq:
4355; ZNVER1: # BB#0:
4356; ZNVER1-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,ymm0[19,20,21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero sched: [2:1.00]
4357; ZNVER1-NEXT: retq # sched: [1:0.50]
4358 %1 = shufflevector <32 x i8> %a0, <32 x i8> zeroinitializer, <32 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 33, i32 34, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48, i32 49, i32 50>
4359 ret <32 x i8> %1
4360}
4361
Simon Pilgrim0af5a7722017-09-12 15:01:20 +00004362define <4 x i64> @test_psrlq(<4 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
4363; GENERIC-LABEL: test_psrlq:
4364; GENERIC: # BB#0:
4365; GENERIC-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
4366; GENERIC-NEXT: vpsrlq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
4367; GENERIC-NEXT: vpsrlq $2, %ymm0, %ymm0 # sched: [1:1.00]
4368; GENERIC-NEXT: retq # sched: [1:1.00]
4369;
4370; HASWELL-LABEL: test_psrlq:
4371; HASWELL: # BB#0:
4372; HASWELL-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
4373; HASWELL-NEXT: vpsrlq (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
4374; HASWELL-NEXT: vpsrlq $2, %ymm0, %ymm0 # sched: [1:1.00]
4375; HASWELL-NEXT: retq # sched: [2:1.00]
4376;
4377; SKYLAKE-LABEL: test_psrlq:
4378; SKYLAKE: # BB#0:
4379; SKYLAKE-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
4380; SKYLAKE-NEXT: vpsrlq (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
4381; SKYLAKE-NEXT: vpsrlq $2, %ymm0, %ymm0 # sched: [1:1.00]
4382; SKYLAKE-NEXT: retq # sched: [2:1.00]
4383;
4384; ZNVER1-LABEL: test_psrlq:
4385; ZNVER1: # BB#0:
4386; ZNVER1-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
4387; ZNVER1-NEXT: vpsrlq (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
4388; ZNVER1-NEXT: vpsrlq $2, %ymm0, %ymm0 # sched: [1:0.25]
4389; ZNVER1-NEXT: retq # sched: [1:0.50]
4390 %1 = call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %a0, <2 x i64> %a1)
4391 %2 = load <2 x i64>, <2 x i64> *%a2, align 16
4392 %3 = call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %1, <2 x i64> %2)
4393 %4 = lshr <4 x i64> %3, <i64 2, i64 2, i64 2, i64 2>
4394 ret <4 x i64> %4
4395}
4396declare <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64>, <2 x i64>) nounwind readnone
4397
4398define <4 x i32> @test_psrlvd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) {
4399; GENERIC-LABEL: test_psrlvd:
4400; GENERIC: # BB#0:
4401; GENERIC-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
4402; GENERIC-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
4403; GENERIC-NEXT: retq # sched: [1:1.00]
4404;
4405; HASWELL-LABEL: test_psrlvd:
4406; HASWELL: # BB#0:
4407; HASWELL-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
4408; HASWELL-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0 # sched: [3:2.00]
4409; HASWELL-NEXT: retq # sched: [2:1.00]
4410;
4411; SKYLAKE-LABEL: test_psrlvd:
4412; SKYLAKE: # BB#0:
4413; SKYLAKE-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 # sched: [3:2.00]
4414; SKYLAKE-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0 # sched: [3:2.00]
4415; SKYLAKE-NEXT: retq # sched: [2:1.00]
4416;
4417; ZNVER1-LABEL: test_psrlvd:
4418; ZNVER1: # BB#0:
4419; ZNVER1-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
4420; ZNVER1-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
4421; ZNVER1-NEXT: retq # sched: [1:0.50]
4422 %1 = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %a0, <4 x i32> %a1)
4423 %2 = load <4 x i32>, <4 x i32> *%a2, align 16
4424 %3 = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %1, <4 x i32> %2)
4425 ret <4 x i32> %3
4426}
4427declare <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32>, <4 x i32>) nounwind readnone
4428
4429define <8 x i32> @test_psrlvd_ymm(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
4430; GENERIC-LABEL: test_psrlvd_ymm:
4431; GENERIC: # BB#0:
4432; GENERIC-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
4433; GENERIC-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
4434; GENERIC-NEXT: retq # sched: [1:1.00]
4435;
4436; HASWELL-LABEL: test_psrlvd_ymm:
4437; HASWELL: # BB#0:
4438; HASWELL-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
4439; HASWELL-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
4440; HASWELL-NEXT: retq # sched: [2:1.00]
4441;
4442; SKYLAKE-LABEL: test_psrlvd_ymm:
4443; SKYLAKE: # BB#0:
4444; SKYLAKE-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 # sched: [3:2.00]
4445; SKYLAKE-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0 # sched: [3:2.00]
4446; SKYLAKE-NEXT: retq # sched: [2:1.00]
4447;
4448; ZNVER1-LABEL: test_psrlvd_ymm:
4449; ZNVER1: # BB#0:
4450; ZNVER1-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
4451; ZNVER1-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
4452; ZNVER1-NEXT: retq # sched: [1:0.50]
4453 %1 = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %a0, <8 x i32> %a1)
4454 %2 = load <8 x i32>, <8 x i32> *%a2, align 32
4455 %3 = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %1, <8 x i32> %2)
4456 ret <8 x i32> %3
4457}
4458declare <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32>, <8 x i32>) nounwind readnone
4459
4460define <2 x i64> @test_psrlvq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) {
4461; GENERIC-LABEL: test_psrlvq:
4462; GENERIC: # BB#0:
4463; GENERIC-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
4464; GENERIC-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0 # sched: [5:1.00]
4465; GENERIC-NEXT: retq # sched: [1:1.00]
4466;
4467; HASWELL-LABEL: test_psrlvq:
4468; HASWELL: # BB#0:
4469; HASWELL-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
4470; HASWELL-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
4471; HASWELL-NEXT: retq # sched: [2:1.00]
4472;
4473; SKYLAKE-LABEL: test_psrlvq:
4474; SKYLAKE: # BB#0:
4475; SKYLAKE-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00]
4476; SKYLAKE-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0 # sched: [1:1.00]
4477; SKYLAKE-NEXT: retq # sched: [2:1.00]
4478;
4479; ZNVER1-LABEL: test_psrlvq:
4480; ZNVER1: # BB#0:
4481; ZNVER1-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 # sched: [1:0.50]
4482; ZNVER1-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0 # sched: [8:0.50]
4483; ZNVER1-NEXT: retq # sched: [1:0.50]
4484 %1 = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> %a0, <2 x i64> %a1)
4485 %2 = load <2 x i64>, <2 x i64> *%a2, align 16
4486 %3 = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> %1, <2 x i64> %2)
4487 ret <2 x i64> %3
4488}
4489declare <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64>, <2 x i64>) nounwind readnone
4490
4491define <4 x i64> @test_psrlvq_ymm(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
4492; GENERIC-LABEL: test_psrlvq_ymm:
4493; GENERIC: # BB#0:
4494; GENERIC-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
4495; GENERIC-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
4496; GENERIC-NEXT: retq # sched: [1:1.00]
4497;
4498; HASWELL-LABEL: test_psrlvq_ymm:
4499; HASWELL: # BB#0:
4500; HASWELL-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
4501; HASWELL-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
4502; HASWELL-NEXT: retq # sched: [2:1.00]
4503;
4504; SKYLAKE-LABEL: test_psrlvq_ymm:
4505; SKYLAKE: # BB#0:
4506; SKYLAKE-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
4507; SKYLAKE-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
4508; SKYLAKE-NEXT: retq # sched: [2:1.00]
4509;
4510; ZNVER1-LABEL: test_psrlvq_ymm:
4511; ZNVER1: # BB#0:
4512; ZNVER1-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
4513; ZNVER1-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
4514; ZNVER1-NEXT: retq # sched: [1:0.50]
4515 %1 = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %a0, <4 x i64> %a1)
4516 %2 = load <4 x i64>, <4 x i64> *%a2, align 32
4517 %3 = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %1, <4 x i64> %2)
4518 ret <4 x i64> %3
4519}
4520declare <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64>, <4 x i64>) nounwind readnone
4521
4522define <16 x i16> @test_psrlw(<16 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) {
4523; GENERIC-LABEL: test_psrlw:
4524; GENERIC: # BB#0:
4525; GENERIC-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 # sched: [1:1.00]
4526; GENERIC-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
4527; GENERIC-NEXT: vpsrlw $2, %ymm0, %ymm0 # sched: [1:1.00]
4528; GENERIC-NEXT: retq # sched: [1:1.00]
4529;
4530; HASWELL-LABEL: test_psrlw:
4531; HASWELL: # BB#0:
4532; HASWELL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
4533; HASWELL-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
4534; HASWELL-NEXT: vpsrlw $2, %ymm0, %ymm0 # sched: [1:1.00]
4535; HASWELL-NEXT: retq # sched: [2:1.00]
4536;
4537; SKYLAKE-LABEL: test_psrlw:
4538; SKYLAKE: # BB#0:
4539; SKYLAKE-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 # sched: [4:1.00]
4540; SKYLAKE-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 # sched: [1:1.00]
4541; SKYLAKE-NEXT: vpsrlw $2, %ymm0, %ymm0 # sched: [1:1.00]
4542; SKYLAKE-NEXT: retq # sched: [2:1.00]
4543;
4544; ZNVER1-LABEL: test_psrlw:
4545; ZNVER1: # BB#0:
4546; ZNVER1-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 # sched: [2:1.00]
4547; ZNVER1-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 # sched: [9:1.00]
4548; ZNVER1-NEXT: vpsrlw $2, %ymm0, %ymm0 # sched: [1:0.25]
4549; ZNVER1-NEXT: retq # sched: [1:0.50]
4550 %1 = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> %a0, <8 x i16> %a1)
4551 %2 = load <8 x i16>, <8 x i16> *%a2, align 16
4552 %3 = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> %1, <8 x i16> %2)
4553 %4 = lshr <16 x i16> %3, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
4554 ret <16 x i16> %4
4555}
4556declare <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16>, <8 x i16>) nounwind readnone
4557
Simon Pilgrim946f08c2017-05-06 13:46:09 +00004558define <32 x i8> @test_psubb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
Simon Pilgrim84846982017-08-01 15:14:35 +00004559; GENERIC-LABEL: test_psubb:
4560; GENERIC: # BB#0:
4561; GENERIC-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
4562; GENERIC-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
4563; GENERIC-NEXT: retq # sched: [1:1.00]
4564;
Simon Pilgrim946f08c2017-05-06 13:46:09 +00004565; HASWELL-LABEL: test_psubb:
4566; HASWELL: # BB#0:
4567; HASWELL-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
Gadi Haberd76f7b82017-08-28 10:04:16 +00004568; HASWELL-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
4569; HASWELL-NEXT: retq # sched: [2:1.00]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00004570;
Gadi Haber767d98b2017-08-30 08:08:50 +00004571; SKYLAKE-LABEL: test_psubb:
4572; SKYLAKE: # BB#0:
4573; SKYLAKE-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
4574; SKYLAKE-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
4575; SKYLAKE-NEXT: retq # sched: [2:1.00]
4576;
Simon Pilgrim946f08c2017-05-06 13:46:09 +00004577; ZNVER1-LABEL: test_psubb:
4578; ZNVER1: # BB#0:
Craig Topper106b5b62017-07-19 02:45:14 +00004579; ZNVER1-NEXT: vpsubb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
4580; ZNVER1-NEXT: vpsubb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
Ashutosh Nemabfcac0b2017-08-31 12:38:35 +00004581; ZNVER1-NEXT: retq # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00004582 %1 = sub <32 x i8> %a0, %a1
4583 %2 = load <32 x i8>, <32 x i8> *%a2, align 32
4584 %3 = sub <32 x i8> %1, %2
4585 ret <32 x i8> %3
4586}
4587
4588define <8 x i32> @test_psubd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
Simon Pilgrim84846982017-08-01 15:14:35 +00004589; GENERIC-LABEL: test_psubd:
4590; GENERIC: # BB#0:
4591; GENERIC-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
4592; GENERIC-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
4593; GENERIC-NEXT: retq # sched: [1:1.00]
4594;
Simon Pilgrim946f08c2017-05-06 13:46:09 +00004595; HASWELL-LABEL: test_psubd:
4596; HASWELL: # BB#0:
4597; HASWELL-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
Gadi Haberd76f7b82017-08-28 10:04:16 +00004598; HASWELL-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
4599; HASWELL-NEXT: retq # sched: [2:1.00]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00004600;
Gadi Haber767d98b2017-08-30 08:08:50 +00004601; SKYLAKE-LABEL: test_psubd:
4602; SKYLAKE: # BB#0:
4603; SKYLAKE-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
4604; SKYLAKE-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
4605; SKYLAKE-NEXT: retq # sched: [2:1.00]
4606;
Simon Pilgrim946f08c2017-05-06 13:46:09 +00004607; ZNVER1-LABEL: test_psubd:
4608; ZNVER1: # BB#0:
Craig Topper106b5b62017-07-19 02:45:14 +00004609; ZNVER1-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
4610; ZNVER1-NEXT: vpsubd (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
Ashutosh Nemabfcac0b2017-08-31 12:38:35 +00004611; ZNVER1-NEXT: retq # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00004612 %1 = sub <8 x i32> %a0, %a1
4613 %2 = load <8 x i32>, <8 x i32> *%a2, align 32
4614 %3 = sub <8 x i32> %1, %2
4615 ret <8 x i32> %3
4616}
4617
4618define <4 x i64> @test_psubq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
Simon Pilgrim84846982017-08-01 15:14:35 +00004619; GENERIC-LABEL: test_psubq:
4620; GENERIC: # BB#0:
4621; GENERIC-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
4622; GENERIC-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
4623; GENERIC-NEXT: retq # sched: [1:1.00]
4624;
Simon Pilgrim946f08c2017-05-06 13:46:09 +00004625; HASWELL-LABEL: test_psubq:
4626; HASWELL: # BB#0:
4627; HASWELL-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
Gadi Haberd76f7b82017-08-28 10:04:16 +00004628; HASWELL-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
4629; HASWELL-NEXT: retq # sched: [2:1.00]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00004630;
Gadi Haber767d98b2017-08-30 08:08:50 +00004631; SKYLAKE-LABEL: test_psubq:
4632; SKYLAKE: # BB#0:
4633; SKYLAKE-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
4634; SKYLAKE-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
4635; SKYLAKE-NEXT: retq # sched: [2:1.00]
4636;
Simon Pilgrim946f08c2017-05-06 13:46:09 +00004637; ZNVER1-LABEL: test_psubq:
4638; ZNVER1: # BB#0:
Craig Topper106b5b62017-07-19 02:45:14 +00004639; ZNVER1-NEXT: vpsubq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
4640; ZNVER1-NEXT: vpsubq (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
Ashutosh Nemabfcac0b2017-08-31 12:38:35 +00004641; ZNVER1-NEXT: retq # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00004642 %1 = sub <4 x i64> %a0, %a1
4643 %2 = load <4 x i64>, <4 x i64> *%a2, align 32
4644 %3 = sub <4 x i64> %1, %2
4645 ret <4 x i64> %3
4646}
4647
Simon Pilgrim0af5a7722017-09-12 15:01:20 +00004648define <32 x i8> @test_psubsb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
4649; GENERIC-LABEL: test_psubsb:
4650; GENERIC: # BB#0:
4651; GENERIC-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
4652; GENERIC-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
4653; GENERIC-NEXT: retq # sched: [1:1.00]
4654;
4655; HASWELL-LABEL: test_psubsb:
4656; HASWELL: # BB#0:
4657; HASWELL-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
4658; HASWELL-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
4659; HASWELL-NEXT: retq # sched: [2:1.00]
4660;
4661; SKYLAKE-LABEL: test_psubsb:
4662; SKYLAKE: # BB#0:
4663; SKYLAKE-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
4664; SKYLAKE-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
4665; SKYLAKE-NEXT: retq # sched: [2:1.00]
4666;
4667; ZNVER1-LABEL: test_psubsb:
4668; ZNVER1: # BB#0:
4669; ZNVER1-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
4670; ZNVER1-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
4671; ZNVER1-NEXT: retq # sched: [1:0.50]
4672 %1 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %a0, <32 x i8> %a1)
4673 %2 = load <32 x i8>, <32 x i8> *%a2, align 32
4674 %3 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %1, <32 x i8> %2)
4675 ret <32 x i8> %3
4676}
4677declare <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8>, <32 x i8>) nounwind readnone
4678
4679define <16 x i16> @test_psubsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
4680; GENERIC-LABEL: test_psubsw:
4681; GENERIC: # BB#0:
4682; GENERIC-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
4683; GENERIC-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
4684; GENERIC-NEXT: retq # sched: [1:1.00]
4685;
4686; HASWELL-LABEL: test_psubsw:
4687; HASWELL: # BB#0:
4688; HASWELL-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
4689; HASWELL-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
4690; HASWELL-NEXT: retq # sched: [2:1.00]
4691;
4692; SKYLAKE-LABEL: test_psubsw:
4693; SKYLAKE: # BB#0:
4694; SKYLAKE-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
4695; SKYLAKE-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
4696; SKYLAKE-NEXT: retq # sched: [2:1.00]
4697;
4698; ZNVER1-LABEL: test_psubsw:
4699; ZNVER1: # BB#0:
4700; ZNVER1-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
4701; ZNVER1-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
4702; ZNVER1-NEXT: retq # sched: [1:0.50]
4703 %1 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %a0, <16 x i16> %a1)
4704 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
4705 %3 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %1, <16 x i16> %2)
4706 ret <16 x i16> %3
4707}
4708declare <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16>, <16 x i16>) nounwind readnone
4709
4710define <32 x i8> @test_psubusb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
4711; GENERIC-LABEL: test_psubusb:
4712; GENERIC: # BB#0:
4713; GENERIC-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
4714; GENERIC-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
4715; GENERIC-NEXT: retq # sched: [1:1.00]
4716;
4717; HASWELL-LABEL: test_psubusb:
4718; HASWELL: # BB#0:
4719; HASWELL-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
4720; HASWELL-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
4721; HASWELL-NEXT: retq # sched: [2:1.00]
4722;
4723; SKYLAKE-LABEL: test_psubusb:
4724; SKYLAKE: # BB#0:
4725; SKYLAKE-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
4726; SKYLAKE-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
4727; SKYLAKE-NEXT: retq # sched: [2:1.00]
4728;
4729; ZNVER1-LABEL: test_psubusb:
4730; ZNVER1: # BB#0:
4731; ZNVER1-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
4732; ZNVER1-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
4733; ZNVER1-NEXT: retq # sched: [1:0.50]
4734 %1 = call <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8> %a0, <32 x i8> %a1)
4735 %2 = load <32 x i8>, <32 x i8> *%a2, align 32
4736 %3 = call <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8> %1, <32 x i8> %2)
4737 ret <32 x i8> %3
4738}
4739declare <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8>, <32 x i8>) nounwind readnone
4740
4741define <16 x i16> @test_psubusw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
4742; GENERIC-LABEL: test_psubusw:
4743; GENERIC: # BB#0:
4744; GENERIC-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
4745; GENERIC-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
4746; GENERIC-NEXT: retq # sched: [1:1.00]
4747;
4748; HASWELL-LABEL: test_psubusw:
4749; HASWELL: # BB#0:
4750; HASWELL-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
4751; HASWELL-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
4752; HASWELL-NEXT: retq # sched: [2:1.00]
4753;
4754; SKYLAKE-LABEL: test_psubusw:
4755; SKYLAKE: # BB#0:
4756; SKYLAKE-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
4757; SKYLAKE-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
4758; SKYLAKE-NEXT: retq # sched: [2:1.00]
4759;
4760; ZNVER1-LABEL: test_psubusw:
4761; ZNVER1: # BB#0:
4762; ZNVER1-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
4763; ZNVER1-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
4764; ZNVER1-NEXT: retq # sched: [1:0.50]
4765 %1 = call <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16> %a0, <16 x i16> %a1)
4766 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
4767 %3 = call <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16> %1, <16 x i16> %2)
4768 ret <16 x i16> %3
4769}
4770declare <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16>, <16 x i16>) nounwind readnone
4771
Simon Pilgrim946f08c2017-05-06 13:46:09 +00004772define <16 x i16> @test_psubw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
Simon Pilgrim84846982017-08-01 15:14:35 +00004773; GENERIC-LABEL: test_psubw:
4774; GENERIC: # BB#0:
4775; GENERIC-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
4776; GENERIC-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [7:1.00]
4777; GENERIC-NEXT: retq # sched: [1:1.00]
4778;
Simon Pilgrim946f08c2017-05-06 13:46:09 +00004779; HASWELL-LABEL: test_psubw:
4780; HASWELL: # BB#0:
4781; HASWELL-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
Gadi Haberd76f7b82017-08-28 10:04:16 +00004782; HASWELL-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
4783; HASWELL-NEXT: retq # sched: [2:1.00]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00004784;
Gadi Haber767d98b2017-08-30 08:08:50 +00004785; SKYLAKE-LABEL: test_psubw:
4786; SKYLAKE: # BB#0:
4787; SKYLAKE-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
4788; SKYLAKE-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
4789; SKYLAKE-NEXT: retq # sched: [2:1.00]
4790;
Simon Pilgrim946f08c2017-05-06 13:46:09 +00004791; ZNVER1-LABEL: test_psubw:
4792; ZNVER1: # BB#0:
Craig Topper106b5b62017-07-19 02:45:14 +00004793; ZNVER1-NEXT: vpsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
4794; ZNVER1-NEXT: vpsubw (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
Ashutosh Nemabfcac0b2017-08-31 12:38:35 +00004795; ZNVER1-NEXT: retq # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00004796 %1 = sub <16 x i16> %a0, %a1
4797 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
4798 %3 = sub <16 x i16> %1, %2
4799 ret <16 x i16> %3
4800}
4801
Simon Pilgrim0af5a7722017-09-12 15:01:20 +00004802define <32 x i8> @test_punpckhbw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
4803; GENERIC-LABEL: test_punpckhbw:
4804; GENERIC: # BB#0:
4805; GENERIC-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] sched: [1:1.00]
4806; GENERIC-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31] sched: [5:1.00]
4807; GENERIC-NEXT: retq # sched: [1:1.00]
4808;
4809; HASWELL-LABEL: test_punpckhbw:
4810; HASWELL: # BB#0:
4811; HASWELL-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] sched: [1:1.00]
4812; HASWELL-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31] sched: [1:1.00]
4813; HASWELL-NEXT: retq # sched: [2:1.00]
4814;
4815; SKYLAKE-LABEL: test_punpckhbw:
4816; SKYLAKE: # BB#0:
4817; SKYLAKE-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] sched: [1:1.00]
4818; SKYLAKE-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31] sched: [1:1.00]
4819; SKYLAKE-NEXT: retq # sched: [2:1.00]
4820;
4821; ZNVER1-LABEL: test_punpckhbw:
4822; ZNVER1: # BB#0:
4823; ZNVER1-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] sched: [1:0.25]
4824; ZNVER1-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31] sched: [8:0.50]
4825; ZNVER1-NEXT: retq # sched: [1:0.50]
4826 %1 = shufflevector <32 x i8> %a0, <32 x i8> %a1, <32 x i32> <i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
4827 %2 = load <32 x i8>, <32 x i8> *%a2, align 32
4828 %3 = shufflevector <32 x i8> %1, <32 x i8> %2, <32 x i32> <i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63>
4829 ret <32 x i8> %3
4830}
Simon Pilgrim76418aa2017-09-12 15:52:01 +00004831
Simon Pilgrim0af5a7722017-09-12 15:01:20 +00004832define <8 x i32> @test_punpckhdq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
4833; GENERIC-LABEL: test_punpckhdq:
4834; GENERIC: # BB#0:
4835; GENERIC-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
4836; GENERIC-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [5:1.00]
4837; GENERIC-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [3:1.00]
4838; GENERIC-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
4839; GENERIC-NEXT: retq # sched: [1:1.00]
4840;
4841; HASWELL-LABEL: test_punpckhdq:
4842; HASWELL: # BB#0:
4843; HASWELL-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
4844; HASWELL-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [1:1.00]
4845; HASWELL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50]
4846; HASWELL-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
4847; HASWELL-NEXT: retq # sched: [2:1.00]
4848;
4849; SKYLAKE-LABEL: test_punpckhdq:
4850; SKYLAKE: # BB#0:
4851; SKYLAKE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00]
4852; SKYLAKE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [1:1.00]
4853; SKYLAKE-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50]
4854; SKYLAKE-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
4855; SKYLAKE-NEXT: retq # sched: [2:1.00]
4856;
4857; ZNVER1-LABEL: test_punpckhdq:
4858; ZNVER1: # BB#0:
4859; ZNVER1-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:0.25]
4860; ZNVER1-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:0.50]
4861; ZNVER1-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.25]
4862; ZNVER1-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
4863; ZNVER1-NEXT: retq # sched: [1:0.50]
4864 %1 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
4865 %2 = load <8 x i32>, <8 x i32> *%a2, align 32
4866 %3 = shufflevector <8 x i32> %1, <8 x i32> %2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
4867 %4 = add <8 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
4868 ret <8 x i32> %4
4869}
4870
4871define <4 x i64> @test_punpckhqdq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
4872; GENERIC-LABEL: test_punpckhqdq:
4873; GENERIC: # BB#0:
4874; GENERIC-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
4875; GENERIC-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [5:1.00]
4876; GENERIC-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
4877; GENERIC-NEXT: retq # sched: [1:1.00]
4878;
4879; HASWELL-LABEL: test_punpckhqdq:
4880; HASWELL: # BB#0:
4881; HASWELL-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
4882; HASWELL-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [1:1.00]
4883; HASWELL-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
4884; HASWELL-NEXT: retq # sched: [2:1.00]
4885;
4886; SKYLAKE-LABEL: test_punpckhqdq:
4887; SKYLAKE: # BB#0:
4888; SKYLAKE-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00]
4889; SKYLAKE-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [1:1.00]
4890; SKYLAKE-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
4891; SKYLAKE-NEXT: retq # sched: [2:1.00]
4892;
4893; ZNVER1-LABEL: test_punpckhqdq:
4894; ZNVER1: # BB#0:
4895; ZNVER1-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:0.25]
4896; ZNVER1-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [8:0.50]
4897; ZNVER1-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
4898; ZNVER1-NEXT: retq # sched: [1:0.50]
4899 %1 = shufflevector <4 x i64> %a0, <4 x i64> %a1, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
4900 %2 = load <4 x i64>, <4 x i64> *%a2, align 32
4901 %3 = shufflevector <4 x i64> %a0, <4 x i64> %2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
4902 %4 = add <4 x i64> %1, %3
4903 ret <4 x i64> %4
4904}
4905
4906define <16 x i16> @test_punpckhwd(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
4907; GENERIC-LABEL: test_punpckhwd:
4908; GENERIC: # BB#0:
4909; GENERIC-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15] sched: [1:1.00]
4910; GENERIC-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15] sched: [5:1.00]
4911; GENERIC-NEXT: retq # sched: [1:1.00]
4912;
4913; HASWELL-LABEL: test_punpckhwd:
4914; HASWELL: # BB#0:
4915; HASWELL-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15] sched: [1:1.00]
4916; HASWELL-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15] sched: [1:1.00]
4917; HASWELL-NEXT: retq # sched: [2:1.00]
4918;
4919; SKYLAKE-LABEL: test_punpckhwd:
4920; SKYLAKE: # BB#0:
4921; SKYLAKE-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15] sched: [1:1.00]
4922; SKYLAKE-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15] sched: [1:1.00]
4923; SKYLAKE-NEXT: retq # sched: [2:1.00]
4924;
4925; ZNVER1-LABEL: test_punpckhwd:
4926; ZNVER1: # BB#0:
4927; ZNVER1-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15] sched: [1:0.25]
4928; ZNVER1-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15] sched: [8:0.50]
4929; ZNVER1-NEXT: retq # sched: [1:0.50]
4930 %1 = shufflevector <16 x i16> %a0, <16 x i16> %a1, <16 x i32> <i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
4931 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
4932 %3 = shufflevector <16 x i16> %1, <16 x i16> %2, <16 x i32> <i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
4933 ret <16 x i16> %3
4934}
4935
4936define <32 x i8> @test_punpcklbw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) {
4937; GENERIC-LABEL: test_punpcklbw:
4938; GENERIC: # BB#0:
4939; GENERIC-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] sched: [1:1.00]
4940; GENERIC-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23] sched: [5:1.00]
4941; GENERIC-NEXT: retq # sched: [1:1.00]
4942;
4943; HASWELL-LABEL: test_punpcklbw:
4944; HASWELL: # BB#0:
4945; HASWELL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] sched: [1:1.00]
4946; HASWELL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23] sched: [1:1.00]
4947; HASWELL-NEXT: retq # sched: [2:1.00]
4948;
4949; SKYLAKE-LABEL: test_punpcklbw:
4950; SKYLAKE: # BB#0:
4951; SKYLAKE-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] sched: [1:1.00]
4952; SKYLAKE-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23] sched: [1:1.00]
4953; SKYLAKE-NEXT: retq # sched: [2:1.00]
4954;
4955; ZNVER1-LABEL: test_punpcklbw:
4956; ZNVER1: # BB#0:
4957; ZNVER1-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] sched: [1:0.25]
4958; ZNVER1-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23] sched: [8:0.50]
4959; ZNVER1-NEXT: retq # sched: [1:0.50]
4960 %1 = shufflevector <32 x i8> %a0, <32 x i8> %a1, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55>
4961 %2 = load <32 x i8>, <32 x i8> *%a2, align 32
4962 %3 = shufflevector <32 x i8> %1, <32 x i8> %2, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55>
4963 ret <32 x i8> %3
4964}
4965
4966define <8 x i32> @test_punpckldq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) {
4967; GENERIC-LABEL: test_punpckldq:
4968; GENERIC: # BB#0:
4969; GENERIC-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
4970; GENERIC-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [5:1.00]
4971; GENERIC-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [3:1.00]
4972; GENERIC-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
4973; GENERIC-NEXT: retq # sched: [1:1.00]
4974;
4975; HASWELL-LABEL: test_punpckldq:
4976; HASWELL: # BB#0:
4977; HASWELL-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
4978; HASWELL-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [1:1.00]
4979; HASWELL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50]
4980; HASWELL-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
4981; HASWELL-NEXT: retq # sched: [2:1.00]
4982;
4983; SKYLAKE-LABEL: test_punpckldq:
4984; SKYLAKE: # BB#0:
4985; SKYLAKE-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00]
4986; SKYLAKE-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [1:1.00]
4987; SKYLAKE-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50]
4988; SKYLAKE-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
4989; SKYLAKE-NEXT: retq # sched: [2:1.00]
4990;
4991; ZNVER1-LABEL: test_punpckldq:
4992; ZNVER1: # BB#0:
4993; ZNVER1-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:0.25]
4994; ZNVER1-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:0.50]
4995; ZNVER1-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.25]
4996; ZNVER1-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
4997; ZNVER1-NEXT: retq # sched: [1:0.50]
4998 %1 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
4999 %2 = load <8 x i32>, <8 x i32> *%a2, align 32
5000 %3 = shufflevector <8 x i32> %1, <8 x i32> %2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
5001 %4 = add <8 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
5002 ret <8 x i32> %4
5003}
5004
5005define <4 x i64> @test_punpcklqdq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
5006; GENERIC-LABEL: test_punpcklqdq:
5007; GENERIC: # BB#0:
5008; GENERIC-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
5009; GENERIC-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [5:1.00]
5010; GENERIC-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [3:1.00]
5011; GENERIC-NEXT: retq # sched: [1:1.00]
5012;
5013; HASWELL-LABEL: test_punpcklqdq:
5014; HASWELL: # BB#0:
5015; HASWELL-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
5016; HASWELL-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [1:1.00]
5017; HASWELL-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
5018; HASWELL-NEXT: retq # sched: [2:1.00]
5019;
5020; SKYLAKE-LABEL: test_punpcklqdq:
5021; SKYLAKE: # BB#0:
5022; SKYLAKE-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00]
5023; SKYLAKE-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [1:1.00]
5024; SKYLAKE-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50]
5025; SKYLAKE-NEXT: retq # sched: [2:1.00]
5026;
5027; ZNVER1-LABEL: test_punpcklqdq:
5028; ZNVER1: # BB#0:
5029; ZNVER1-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:0.25]
5030; ZNVER1-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [8:0.50]
5031; ZNVER1-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.25]
5032; ZNVER1-NEXT: retq # sched: [1:0.50]
5033 %1 = shufflevector <4 x i64> %a0, <4 x i64> %a1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
5034 %2 = load <4 x i64>, <4 x i64> *%a2, align 32
5035 %3 = shufflevector <4 x i64> %a0, <4 x i64> %2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
5036 %4 = add <4 x i64> %1, %3
5037 ret <4 x i64> %4
5038}
5039
5040define <16 x i16> @test_punpcklwd(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) {
5041; GENERIC-LABEL: test_punpcklwd:
5042; GENERIC: # BB#0:
5043; GENERIC-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11] sched: [1:1.00]
5044; GENERIC-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11] sched: [5:1.00]
5045; GENERIC-NEXT: retq # sched: [1:1.00]
5046;
5047; HASWELL-LABEL: test_punpcklwd:
5048; HASWELL: # BB#0:
5049; HASWELL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11] sched: [1:1.00]
5050; HASWELL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11] sched: [1:1.00]
5051; HASWELL-NEXT: retq # sched: [2:1.00]
5052;
5053; SKYLAKE-LABEL: test_punpcklwd:
5054; SKYLAKE: # BB#0:
5055; SKYLAKE-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11] sched: [1:1.00]
5056; SKYLAKE-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11] sched: [1:1.00]
5057; SKYLAKE-NEXT: retq # sched: [2:1.00]
5058;
5059; ZNVER1-LABEL: test_punpcklwd:
5060; ZNVER1: # BB#0:
5061; ZNVER1-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11] sched: [1:0.25]
5062; ZNVER1-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11] sched: [8:0.50]
5063; ZNVER1-NEXT: retq # sched: [1:0.50]
5064 %1 = shufflevector <16 x i16> %a0, <16 x i16> %a1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27>
5065 %2 = load <16 x i16>, <16 x i16> *%a2, align 32
5066 %3 = shufflevector <16 x i16> %1, <16 x i16> %2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27>
5067 ret <16 x i16> %3
5068}
5069
Simon Pilgrim946f08c2017-05-06 13:46:09 +00005070define <4 x i64> @test_pxor(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) {
Simon Pilgrim84846982017-08-01 15:14:35 +00005071; GENERIC-LABEL: test_pxor:
5072; GENERIC: # BB#0:
5073; GENERIC-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:1.00]
5074; GENERIC-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [5:1.00]
5075; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00]
5076; GENERIC-NEXT: retq # sched: [1:1.00]
5077;
Simon Pilgrim946f08c2017-05-06 13:46:09 +00005078; HASWELL-LABEL: test_pxor:
5079; HASWELL: # BB#0:
5080; HASWELL-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
Gadi Haberd76f7b82017-08-28 10:04:16 +00005081; HASWELL-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00005082; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
Gadi Haberd76f7b82017-08-28 10:04:16 +00005083; HASWELL-NEXT: retq # sched: [2:1.00]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00005084;
Gadi Haber767d98b2017-08-30 08:08:50 +00005085; SKYLAKE-LABEL: test_pxor:
5086; SKYLAKE: # BB#0:
5087; SKYLAKE-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.33]
5088; SKYLAKE-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [1:0.50]
5089; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50]
5090; SKYLAKE-NEXT: retq # sched: [2:1.00]
5091;
Simon Pilgrim946f08c2017-05-06 13:46:09 +00005092; ZNVER1-LABEL: test_pxor:
5093; ZNVER1: # BB#0:
Craig Topper106b5b62017-07-19 02:45:14 +00005094; ZNVER1-NEXT: vpxor %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
5095; ZNVER1-NEXT: vpxor (%rdi), %ymm0, %ymm0 # sched: [8:0.50]
5096; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25]
Ashutosh Nemabfcac0b2017-08-31 12:38:35 +00005097; ZNVER1-NEXT: retq # sched: [1:0.50]
Simon Pilgrim946f08c2017-05-06 13:46:09 +00005098 %1 = xor <4 x i64> %a0, %a1
5099 %2 = load <4 x i64>, <4 x i64> *%a2, align 32
5100 %3 = xor <4 x i64> %1, %2
5101 %4 = add <4 x i64> %3, %a1
5102 ret <4 x i64> %4
5103}
5104
5105!0 = !{i32 1}