blob: bdc5e09b45e6d0c8ad9ea6d96cd1d73890aa460d [file] [log] [blame]
Simon Pilgrimbe527b52018-11-14 20:44:59 +00001; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
3; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx2 < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
4; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx512f < %s | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F
5; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx512f,avx512bw,avx512vl < %s | FileCheck %s --check-prefix=AVX512 --check-prefix=SKX
6
7; To test for the case where masked store is not legal, we should add a run with a target
8; that does not have AVX, but that case should probably be a separate test file using less tests
9; because it takes over 1.2 seconds to codegen these tests on Haswell 4GHz if there's no maskmov.
10
11define void @store_v1i32_v1i32(<1 x i32> %trigger, <1 x i32>* %addr, <1 x i32> %val) {
12; AVX-LABEL: store_v1i32_v1i32:
13; AVX: ## %bb.0:
14; AVX-NEXT: testl %edi, %edi
15; AVX-NEXT: jne LBB0_2
16; AVX-NEXT: ## %bb.1: ## %cond.store
17; AVX-NEXT: movl %edx, (%rsi)
18; AVX-NEXT: LBB0_2: ## %else
19; AVX-NEXT: retq
20;
21; AVX512-LABEL: store_v1i32_v1i32:
22; AVX512: ## %bb.0:
23; AVX512-NEXT: testl %edi, %edi
24; AVX512-NEXT: jne LBB0_2
25; AVX512-NEXT: ## %bb.1: ## %cond.store
26; AVX512-NEXT: movl %edx, (%rsi)
27; AVX512-NEXT: LBB0_2: ## %else
28; AVX512-NEXT: retq
29 %mask = icmp eq <1 x i32> %trigger, zeroinitializer
30 call void @llvm.masked.store.v1i32.p0v1i32(<1 x i32>%val, <1 x i32>* %addr, i32 4, <1 x i1>%mask)
31 ret void
32}
33declare void @llvm.masked.store.v1i32.p0v1i32(<1 x i32>, <1 x i32>*, i32, <1 x i1>)
34
35define void @store_v4i32_v4i32(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %val) {
36; AVX1-LABEL: store_v4i32_v4i32:
37; AVX1: ## %bb.0:
38; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
39; AVX1-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
40; AVX1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi)
41; AVX1-NEXT: retq
42;
43; AVX2-LABEL: store_v4i32_v4i32:
44; AVX2: ## %bb.0:
45; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
46; AVX2-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
47; AVX2-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi)
48; AVX2-NEXT: retq
49;
50; AVX512F-LABEL: store_v4i32_v4i32:
51; AVX512F: ## %bb.0:
52; AVX512F-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
53; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
54; AVX512F-NEXT: vptestnmd %zmm0, %zmm0, %k0
55; AVX512F-NEXT: kshiftlw $12, %k0, %k0
56; AVX512F-NEXT: kshiftrw $12, %k0, %k1
57; AVX512F-NEXT: vmovdqu32 %zmm1, (%rdi) {%k1}
58; AVX512F-NEXT: vzeroupper
59; AVX512F-NEXT: retq
60;
61; SKX-LABEL: store_v4i32_v4i32:
62; SKX: ## %bb.0:
63; SKX-NEXT: vptestnmd %xmm0, %xmm0, %k1
64; SKX-NEXT: vmovdqu32 %xmm1, (%rdi) {%k1}
65; SKX-NEXT: retq
66 %mask = icmp eq <4 x i32> %trigger, zeroinitializer
67 call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>%val, <4 x i32>* %addr, i32 4, <4 x i1>%mask)
68 ret void
69}
70
71define void @store_v8i32_v8i32(<8 x i32> %trigger, <8 x i32>* %addr, <8 x i32> %val) {
72; AVX1-LABEL: store_v8i32_v8i32:
73; AVX1: ## %bb.0:
74; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
75; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
76; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
77; AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
78; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
79; AVX1-NEXT: vmaskmovps %ymm1, %ymm0, (%rdi)
80; AVX1-NEXT: vzeroupper
81; AVX1-NEXT: retq
82;
83; AVX2-LABEL: store_v8i32_v8i32:
84; AVX2: ## %bb.0:
85; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
86; AVX2-NEXT: vpcmpeqd %ymm2, %ymm0, %ymm0
87; AVX2-NEXT: vpmaskmovd %ymm1, %ymm0, (%rdi)
88; AVX2-NEXT: vzeroupper
89; AVX2-NEXT: retq
90;
91; AVX512F-LABEL: store_v8i32_v8i32:
92; AVX512F: ## %bb.0:
93; AVX512F-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
94; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
95; AVX512F-NEXT: vptestnmd %zmm0, %zmm0, %k0
96; AVX512F-NEXT: kshiftlw $8, %k0, %k0
97; AVX512F-NEXT: kshiftrw $8, %k0, %k1
98; AVX512F-NEXT: vmovdqu32 %zmm1, (%rdi) {%k1}
99; AVX512F-NEXT: vzeroupper
100; AVX512F-NEXT: retq
101;
102; SKX-LABEL: store_v8i32_v8i32:
103; SKX: ## %bb.0:
104; SKX-NEXT: vptestnmd %ymm0, %ymm0, %k1
105; SKX-NEXT: vmovdqu32 %ymm1, (%rdi) {%k1}
106; SKX-NEXT: vzeroupper
107; SKX-NEXT: retq
108 %mask = icmp eq <8 x i32> %trigger, zeroinitializer
109 call void @llvm.masked.store.v8i32.p0v8i32(<8 x i32>%val, <8 x i32>* %addr, i32 4, <8 x i1>%mask)
110 ret void
111}
112
113define void @store_v2f32_v2i32(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %val) {
114; AVX1-LABEL: store_v2f32_v2i32:
115; AVX1: ## %bb.0:
116; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
117; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
118; AVX1-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
119; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
120; AVX1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi)
121; AVX1-NEXT: retq
122;
123; AVX2-LABEL: store_v2f32_v2i32:
124; AVX2: ## %bb.0:
125; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
126; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
127; AVX2-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
128; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
129; AVX2-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi)
130; AVX2-NEXT: retq
131;
132; AVX512F-LABEL: store_v2f32_v2i32:
133; AVX512F: ## %bb.0:
134; AVX512F-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
135; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
136; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
137; AVX512F-NEXT: vptestnmq %zmm0, %zmm0, %k0
138; AVX512F-NEXT: kshiftlw $14, %k0, %k0
139; AVX512F-NEXT: kshiftrw $14, %k0, %k1
140; AVX512F-NEXT: vmovups %zmm1, (%rdi) {%k1}
141; AVX512F-NEXT: vzeroupper
142; AVX512F-NEXT: retq
143;
144; SKX-LABEL: store_v2f32_v2i32:
145; SKX: ## %bb.0:
146; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
147; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
148; SKX-NEXT: vptestnmq %xmm0, %xmm0, %k1
149; SKX-NEXT: vmovups %xmm1, (%rdi) {%k1}
150; SKX-NEXT: retq
151 %mask = icmp eq <2 x i32> %trigger, zeroinitializer
152 call void @llvm.masked.store.v2f32.p0v2f32(<2 x float>%val, <2 x float>* %addr, i32 4, <2 x i1>%mask)
153 ret void
154}
155
156define void @store_v2i32_v2i32(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %val) {
157; AVX1-LABEL: store_v2i32_v2i32:
158; AVX1: ## %bb.0:
159; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
160; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
161; AVX1-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
162; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
163; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,2,2,3]
164; AVX1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi)
165; AVX1-NEXT: retq
166;
167; AVX2-LABEL: store_v2i32_v2i32:
168; AVX2: ## %bb.0:
169; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
170; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
171; AVX2-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
172; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
173; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
174; AVX2-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi)
175; AVX2-NEXT: retq
176;
177; AVX512F-LABEL: store_v2i32_v2i32:
178; AVX512F: ## %bb.0:
179; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
180; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
181; AVX512F-NEXT: vptestnmq %zmm0, %zmm0, %k0
182; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
183; AVX512F-NEXT: kshiftlw $14, %k0, %k0
184; AVX512F-NEXT: kshiftrw $14, %k0, %k1
185; AVX512F-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1}
186; AVX512F-NEXT: vzeroupper
187; AVX512F-NEXT: retq
188;
189; SKX-LABEL: store_v2i32_v2i32:
190; SKX: ## %bb.0:
191; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
192; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
193; SKX-NEXT: vptestnmq %xmm0, %xmm0, %k1
194; SKX-NEXT: vpmovqd %xmm1, (%rdi) {%k1}
195; SKX-NEXT: retq
196 %mask = icmp eq <2 x i32> %trigger, zeroinitializer
197 call void @llvm.masked.store.v2i32.p0v2i32(<2 x i32>%val, <2 x i32>* %addr, i32 4, <2 x i1>%mask)
198 ret void
199}
200
201define void @const_store_v4i32_v4i32(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %val) {
202; AVX1-LABEL: const_store_v4i32_v4i32:
203; AVX1: ## %bb.0:
204; AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
205; AVX1-NEXT: vmaskmovps %xmm1, %xmm0, (%rdi)
206; AVX1-NEXT: retq
207;
208; AVX2-LABEL: const_store_v4i32_v4i32:
209; AVX2: ## %bb.0:
210; AVX2-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
211; AVX2-NEXT: vpmaskmovd %xmm1, %xmm0, (%rdi)
212; AVX2-NEXT: retq
213;
214; AVX512F-LABEL: const_store_v4i32_v4i32:
215; AVX512F: ## %bb.0:
216; AVX512F-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
217; AVX512F-NEXT: movw $15, %ax
218; AVX512F-NEXT: kmovw %eax, %k1
219; AVX512F-NEXT: vmovdqu32 %zmm1, (%rdi) {%k1}
220; AVX512F-NEXT: vzeroupper
221; AVX512F-NEXT: retq
222;
223; SKX-LABEL: const_store_v4i32_v4i32:
224; SKX: ## %bb.0:
225; SKX-NEXT: kxnorw %k0, %k0, %k1
226; SKX-NEXT: vmovdqu32 %xmm1, (%rdi) {%k1}
227; SKX-NEXT: retq
228 %mask = icmp eq <4 x i32> %trigger, zeroinitializer
229 call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>%val, <4 x i32>* %addr, i32 4, <4 x i1><i1 true, i1 true, i1 true, i1 true>)
230 ret void
231}
232
233; When only one element of the mask is set, reduce to a scalar store.
234
235define void @one_mask_bit_set1(<4 x i32>* %addr, <4 x i32> %val) {
236; AVX-LABEL: one_mask_bit_set1:
237; AVX: ## %bb.0:
238; AVX-NEXT: vmovss %xmm0, (%rdi)
239; AVX-NEXT: retq
240;
241; AVX512-LABEL: one_mask_bit_set1:
242; AVX512: ## %bb.0:
243; AVX512-NEXT: vmovss %xmm0, (%rdi)
244; AVX512-NEXT: retq
245 call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %val, <4 x i32>* %addr, i32 4, <4 x i1><i1 true, i1 false, i1 false, i1 false>)
246 ret void
247}
248
249; Choose a different element to show that the correct address offset is produced.
250
251define void @one_mask_bit_set2(<4 x float>* %addr, <4 x float> %val) {
252; AVX-LABEL: one_mask_bit_set2:
253; AVX: ## %bb.0:
254; AVX-NEXT: vextractps $2, %xmm0, 8(%rdi)
255; AVX-NEXT: retq
256;
257; AVX512-LABEL: one_mask_bit_set2:
258; AVX512: ## %bb.0:
259; AVX512-NEXT: vextractps $2, %xmm0, 8(%rdi)
260; AVX512-NEXT: retq
261 call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %val, <4 x float>* %addr, i32 4, <4 x i1><i1 false, i1 false, i1 true, i1 false>)
262 ret void
263}
264
265; Choose a different scalar type and a high element of a 256-bit vector because AVX doesn't support those evenly.
266
267define void @one_mask_bit_set3(<4 x i64>* %addr, <4 x i64> %val) {
268; AVX-LABEL: one_mask_bit_set3:
269; AVX: ## %bb.0:
270; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
271; AVX-NEXT: vmovlps %xmm0, 16(%rdi)
272; AVX-NEXT: vzeroupper
273; AVX-NEXT: retq
274;
275; AVX512-LABEL: one_mask_bit_set3:
276; AVX512: ## %bb.0:
277; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
278; AVX512-NEXT: vmovlps %xmm0, 16(%rdi)
279; AVX512-NEXT: vzeroupper
280; AVX512-NEXT: retq
281 call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> %val, <4 x i64>* %addr, i32 4, <4 x i1><i1 false, i1 false, i1 true, i1 false>)
282 ret void
283}
284
285; Choose a different scalar type and a high element of a 256-bit vector because AVX doesn't support those evenly.
286
287define void @one_mask_bit_set4(<4 x double>* %addr, <4 x double> %val) {
288; AVX-LABEL: one_mask_bit_set4:
289; AVX: ## %bb.0:
290; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
291; AVX-NEXT: vmovhpd %xmm0, 24(%rdi)
292; AVX-NEXT: vzeroupper
293; AVX-NEXT: retq
294;
295; AVX512-LABEL: one_mask_bit_set4:
296; AVX512: ## %bb.0:
297; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
298; AVX512-NEXT: vmovhpd %xmm0, 24(%rdi)
299; AVX512-NEXT: vzeroupper
300; AVX512-NEXT: retq
301 call void @llvm.masked.store.v4f64.p0v4f64(<4 x double> %val, <4 x double>* %addr, i32 4, <4 x i1><i1 false, i1 false, i1 false, i1 true>)
302 ret void
303}
304
305; Try a 512-bit vector to make sure AVX doesn't die and AVX512 works as expected.
306
307define void @one_mask_bit_set5(<8 x double>* %addr, <8 x double> %val) {
308; AVX-LABEL: one_mask_bit_set5:
309; AVX: ## %bb.0:
310; AVX-NEXT: vextractf128 $1, %ymm1, %xmm0
311; AVX-NEXT: vmovlps %xmm0, 48(%rdi)
312; AVX-NEXT: vzeroupper
313; AVX-NEXT: retq
314;
315; AVX512-LABEL: one_mask_bit_set5:
316; AVX512: ## %bb.0:
317; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0
318; AVX512-NEXT: vmovlps %xmm0, 48(%rdi)
319; AVX512-NEXT: vzeroupper
320; AVX512-NEXT: retq
321 call void @llvm.masked.store.v8f64.p0v8f64(<8 x double> %val, <8 x double>* %addr, i32 4, <8 x i1><i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 false>)
322 ret void
323}
324
325; The mask bit for each data element is the most significant bit of the mask operand, so a compare isn't needed.
326; FIXME: The AVX512 code should be improved to use 'vpmovd2m'. Add tests for 512-bit vectors when implementing that.
327
328define void @trunc_mask(<4 x float> %x, <4 x float>* %ptr, <4 x float> %y, <4 x i32> %mask) {
329; AVX-LABEL: trunc_mask:
330; AVX: ## %bb.0:
331; AVX-NEXT: vmaskmovps %xmm0, %xmm2, (%rdi)
332; AVX-NEXT: retq
333;
334; AVX512F-LABEL: trunc_mask:
335; AVX512F: ## %bb.0:
336; AVX512F-NEXT: ## kill: def $xmm2 killed $xmm2 def $zmm2
337; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
338; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
339; AVX512F-NEXT: vpcmpgtd %zmm2, %zmm1, %k0
340; AVX512F-NEXT: kshiftlw $12, %k0, %k0
341; AVX512F-NEXT: kshiftrw $12, %k0, %k1
342; AVX512F-NEXT: vmovups %zmm0, (%rdi) {%k1}
343; AVX512F-NEXT: vzeroupper
344; AVX512F-NEXT: retq
345;
346; SKX-LABEL: trunc_mask:
347; SKX: ## %bb.0:
348; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1
349; SKX-NEXT: vpcmpgtd %xmm2, %xmm1, %k1
350; SKX-NEXT: vmovups %xmm0, (%rdi) {%k1}
351; SKX-NEXT: retq
352 %bool_mask = icmp slt <4 x i32> %mask, zeroinitializer
353 call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %x, <4 x float>* %ptr, i32 1, <4 x i1> %bool_mask)
354 ret void
355}
356
357; SimplifyDemandedBits eliminates an ashr here.
358
359define void @masked_store_bool_mask_demand_trunc_sext(<4 x double> %x, <4 x double>* %p, <4 x i32> %masksrc) {
360; AVX1-LABEL: masked_store_bool_mask_demand_trunc_sext:
361; AVX1: ## %bb.0:
362; AVX1-NEXT: vpslld $31, %xmm1, %xmm1
363; AVX1-NEXT: vpmovsxdq %xmm1, %xmm2
364; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
365; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
366; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
367; AVX1-NEXT: vmaskmovpd %ymm0, %ymm1, (%rdi)
368; AVX1-NEXT: vzeroupper
369; AVX1-NEXT: retq
370;
371; AVX2-LABEL: masked_store_bool_mask_demand_trunc_sext:
372; AVX2: ## %bb.0:
373; AVX2-NEXT: vpslld $31, %xmm1, %xmm1
374; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
375; AVX2-NEXT: vmaskmovpd %ymm0, %ymm1, (%rdi)
376; AVX2-NEXT: vzeroupper
377; AVX2-NEXT: retq
378;
379; AVX512F-LABEL: masked_store_bool_mask_demand_trunc_sext:
380; AVX512F: ## %bb.0:
381; AVX512F-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
382; AVX512F-NEXT: vpslld $31, %xmm1, %xmm1
383; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0
384; AVX512F-NEXT: kshiftlw $12, %k0, %k0
385; AVX512F-NEXT: kshiftrw $12, %k0, %k1
386; AVX512F-NEXT: vmovupd %zmm0, (%rdi) {%k1}
387; AVX512F-NEXT: vzeroupper
388; AVX512F-NEXT: retq
389;
390; SKX-LABEL: masked_store_bool_mask_demand_trunc_sext:
391; SKX: ## %bb.0:
392; SKX-NEXT: vpslld $31, %xmm1, %xmm1
393; SKX-NEXT: vptestmd %xmm1, %xmm1, %k1
394; SKX-NEXT: vmovupd %ymm0, (%rdi) {%k1}
395; SKX-NEXT: vzeroupper
396; SKX-NEXT: retq
397 %sext = sext <4 x i32> %masksrc to <4 x i64>
398 %boolmask = trunc <4 x i64> %sext to <4 x i1>
399 call void @llvm.masked.store.v4f64.p0v4f64(<4 x double> %x, <4 x double>* %p, i32 4, <4 x i1> %boolmask)
400 ret void
401}
402
403; This needs to be widened to v4i32.
404; This used to assert in type legalization. PR38436
405; FIXME: The codegen for AVX512 should use KSHIFT to zero the upper bits of the mask.
406define void @widen_masked_store(<3 x i32> %v, <3 x i32>* %p, <3 x i1> %mask) {
407; AVX1-LABEL: widen_masked_store:
408; AVX1: ## %bb.0:
409; AVX1-NEXT: vmovd %edx, %xmm1
410; AVX1-NEXT: vmovd %esi, %xmm2
411; AVX1-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
412; AVX1-NEXT: vmovd %ecx, %xmm2
413; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
414; AVX1-NEXT: vpslld $31, %xmm1, %xmm1
415; AVX1-NEXT: vmaskmovps %xmm0, %xmm1, (%rdi)
416; AVX1-NEXT: retq
417;
418; AVX2-LABEL: widen_masked_store:
419; AVX2: ## %bb.0:
420; AVX2-NEXT: vmovd %edx, %xmm1
421; AVX2-NEXT: vmovd %esi, %xmm2
422; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
423; AVX2-NEXT: vmovd %ecx, %xmm2
424; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
425; AVX2-NEXT: vpslld $31, %xmm1, %xmm1
426; AVX2-NEXT: vpmaskmovd %xmm0, %xmm1, (%rdi)
427; AVX2-NEXT: retq
428;
429; AVX512F-LABEL: widen_masked_store:
430; AVX512F: ## %bb.0:
431; AVX512F-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
432; AVX512F-NEXT: vpslld $31, %xmm1, %xmm1
433; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k1
434; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
435; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
436; AVX512F-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3]
437; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0
438; AVX512F-NEXT: kshiftlw $12, %k0, %k0
439; AVX512F-NEXT: kshiftrw $12, %k0, %k1
440; AVX512F-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1}
441; AVX512F-NEXT: vzeroupper
442; AVX512F-NEXT: retq
443;
444; SKX-LABEL: widen_masked_store:
445; SKX: ## %bb.0:
446; SKX-NEXT: vpslld $31, %xmm1, %xmm1
447; SKX-NEXT: vptestmd %xmm1, %xmm1, %k1
448; SKX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
449; SKX-NEXT: vmovdqa32 %xmm1, %xmm1 {%k1} {z}
450; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
451; SKX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3]
452; SKX-NEXT: vptestmd %xmm1, %xmm1, %k1
453; SKX-NEXT: vmovdqa32 %xmm0, (%rdi) {%k1}
454; SKX-NEXT: retq
455 call void @llvm.masked.store.v3i32(<3 x i32> %v, <3 x i32>* %p, i32 16, <3 x i1> %mask)
456 ret void
457}
458declare void @llvm.masked.store.v3i32(<3 x i32>, <3 x i32>*, i32, <3 x i1>)
459
460declare void @llvm.masked.store.v8i32.p0v8i32(<8 x i32>, <8 x i32>*, i32, <8 x i1>)
461declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
462declare void @llvm.masked.store.v4i64.p0v4i64(<4 x i64>, <4 x i64>*, i32, <4 x i1>)
463declare void @llvm.masked.store.v2f32.p0v2f32(<2 x float>, <2 x float>*, i32, <2 x i1>)
464declare void @llvm.masked.store.v2i32.p0v2i32(<2 x i32>, <2 x i32>*, i32, <2 x i1>)
465declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>)
466declare void @llvm.masked.store.v8f64.p0v8f64(<8 x double>, <8 x double>*, i32, <8 x i1>)
467declare void @llvm.masked.store.v4f64.p0v4f64(<4 x double>, <4 x double>*, i32, <4 x i1>)
468declare void @llvm.masked.store.v2f64.p0v2f64(<2 x double>, <2 x double>*, i32, <2 x i1>)
469declare void @llvm.masked.store.v2i64.p0v2i64(<2 x i64>, <2 x i64>*, i32, <2 x i1>)
470