blob: a0a94b96f7d5e2d2abc922514b8a5949fe74b43e [file] [log] [blame]
Simon Pilgrim146f1f22019-07-04 13:31:49 +00001; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSSE3
4; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
5; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
6; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
Simon Pilgrime602f702019-07-04 15:00:04 +00007
8;
9; Partial Vector Loads - PR16739
10;
Simon Pilgrim146f1f22019-07-04 13:31:49 +000011
12define <4 x float> @load_float4_float3(<4 x float>* nocapture readonly dereferenceable(16)) {
Simon Pilgrim6a585832019-07-10 10:46:36 +000013; SSE-LABEL: load_float4_float3:
14; SSE: # %bb.0:
15; SSE-NEXT: movups (%rdi), %xmm0
16; SSE-NEXT: retq
Simon Pilgrim146f1f22019-07-04 13:31:49 +000017;
18; AVX-LABEL: load_float4_float3:
19; AVX: # %bb.0:
Simon Pilgrim6a585832019-07-10 10:46:36 +000020; AVX-NEXT: vmovups (%rdi), %xmm0
Simon Pilgrim146f1f22019-07-04 13:31:49 +000021; AVX-NEXT: retq
22 %p0 = getelementptr inbounds <4 x float>, <4 x float>* %0, i64 0, i64 0
23 %p1 = getelementptr inbounds <4 x float>, <4 x float>* %0, i64 0, i64 1
24 %p2 = getelementptr inbounds <4 x float>, <4 x float>* %0, i64 0, i64 2
25 %ld0 = load float, float* %p0, align 4
26 %ld1 = load float, float* %p1, align 4
27 %ld2 = load float, float* %p2, align 4
28 %r0 = insertelement <4 x float> undef, float %ld0, i32 0
29 %r1 = insertelement <4 x float> %r0, float %ld1, i32 1
30 %r2 = insertelement <4 x float> %r1, float %ld2, i32 2
31 ret <4 x float> %r2
32}
33
34define <8 x float> @load_float8_float3(<4 x float>* nocapture readonly dereferenceable(16)) {
Simon Pilgrim6a585832019-07-10 10:46:36 +000035; SSE-LABEL: load_float8_float3:
36; SSE: # %bb.0:
37; SSE-NEXT: movups (%rdi), %xmm0
38; SSE-NEXT: retq
Simon Pilgrim146f1f22019-07-04 13:31:49 +000039;
40; AVX-LABEL: load_float8_float3:
41; AVX: # %bb.0:
Simon Pilgrim6a585832019-07-10 10:46:36 +000042; AVX-NEXT: vmovups (%rdi), %xmm0
Simon Pilgrim146f1f22019-07-04 13:31:49 +000043; AVX-NEXT: retq
44 %p0 = getelementptr inbounds <4 x float>, <4 x float>* %0, i64 0, i64 0
45 %p1 = getelementptr inbounds <4 x float>, <4 x float>* %0, i64 0, i64 1
46 %p2 = getelementptr inbounds <4 x float>, <4 x float>* %0, i64 0, i64 2
47 %ld0 = load float, float* %p0, align 4
48 %ld1 = load float, float* %p1, align 4
49 %ld2 = load float, float* %p2, align 4
50 %r0 = insertelement <8 x float> undef, float %ld0, i32 0
51 %r1 = insertelement <8 x float> %r0, float %ld1, i32 1
52 %r2 = insertelement <8 x float> %r1, float %ld2, i32 2
53 ret <8 x float> %r2
54}
55
56define <4 x float> @load_float4_float3_as_float2_float(<4 x float>* nocapture readonly dereferenceable(16)) {
Reid Klecknerba9c9e62019-07-18 21:26:41 +000057; SSE2-LABEL: load_float4_float3_as_float2_float:
58; SSE2: # %bb.0:
59; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
60; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
61; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
62; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
63; SSE2-NEXT: retq
64;
65; SSSE3-LABEL: load_float4_float3_as_float2_float:
66; SSSE3: # %bb.0:
67; SSSE3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
68; SSSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
69; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
70; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
71; SSSE3-NEXT: retq
72;
73; SSE41-LABEL: load_float4_float3_as_float2_float:
74; SSE41: # %bb.0:
75; SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
76; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
77; SSE41-NEXT: retq
Simon Pilgrim146f1f22019-07-04 13:31:49 +000078;
79; AVX-LABEL: load_float4_float3_as_float2_float:
80; AVX: # %bb.0:
Reid Klecknerba9c9e62019-07-18 21:26:41 +000081; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
82; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
Simon Pilgrim146f1f22019-07-04 13:31:49 +000083; AVX-NEXT: retq
84 %2 = bitcast <4 x float>* %0 to <2 x float>*
85 %3 = load <2 x float>, <2 x float>* %2, align 4
86 %4 = extractelement <2 x float> %3, i32 0
87 %5 = insertelement <4 x float> undef, float %4, i32 0
88 %6 = extractelement <2 x float> %3, i32 1
89 %7 = insertelement <4 x float> %5, float %6, i32 1
90 %8 = getelementptr inbounds <4 x float>, <4 x float>* %0, i64 0, i64 2
91 %9 = load float, float* %8, align 4
92 %10 = insertelement <4 x float> %7, float %9, i32 2
93 ret <4 x float> %10
94}
95
96define <4 x float> @load_float4_float3_trunc(<4 x float>* nocapture readonly dereferenceable(16)) {
Reid Klecknerba9c9e62019-07-18 21:26:41 +000097; SSE2-LABEL: load_float4_float3_trunc:
98; SSE2: # %bb.0:
99; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
100; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
101; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
102; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
103; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
104; SSE2-NEXT: retq
105;
106; SSSE3-LABEL: load_float4_float3_trunc:
107; SSSE3: # %bb.0:
108; SSSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
109; SSSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
110; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
111; SSSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
112; SSSE3-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
113; SSSE3-NEXT: retq
114;
115; SSE41-LABEL: load_float4_float3_trunc:
116; SSE41: # %bb.0:
117; SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
118; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
119; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
120; SSE41-NEXT: retq
Simon Pilgrim146f1f22019-07-04 13:31:49 +0000121;
122; AVX-LABEL: load_float4_float3_trunc:
123; AVX: # %bb.0:
Reid Klecknerba9c9e62019-07-18 21:26:41 +0000124; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
125; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
126; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
Simon Pilgrim146f1f22019-07-04 13:31:49 +0000127; AVX-NEXT: retq
128 %2 = bitcast <4 x float>* %0 to i64*
129 %3 = load i64, i64* %2, align 16
130 %4 = getelementptr inbounds <4 x float>, <4 x float>* %0, i64 0, i64 2
131 %5 = bitcast float* %4 to i64*
132 %6 = load i64, i64* %5, align 8
133 %7 = trunc i64 %3 to i32
134 %8 = bitcast i32 %7 to float
135 %9 = insertelement <4 x float> undef, float %8, i32 0
136 %10 = lshr i64 %3, 32
137 %11 = trunc i64 %10 to i32
138 %12 = bitcast i32 %11 to float
139 %13 = insertelement <4 x float> %9, float %12, i32 1
140 %14 = trunc i64 %6 to i32
141 %15 = bitcast i32 %14 to float
142 %16 = insertelement <4 x float> %13, float %15, i32 2
143 ret <4 x float> %16
144}
Simon Pilgrime602f702019-07-04 15:00:04 +0000145
146; PR21780
147define <4 x double> @load_double4_0u2u(double* nocapture readonly dereferenceable(32)) {
148; SSE2-LABEL: load_double4_0u2u:
149; SSE2: # %bb.0:
150; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
151; SSE2-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
152; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
153; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0,0]
154; SSE2-NEXT: retq
155;
156; SSSE3-LABEL: load_double4_0u2u:
157; SSSE3: # %bb.0:
158; SSSE3-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
159; SSSE3-NEXT: movddup {{.*#+}} xmm1 = mem[0,0]
160; SSSE3-NEXT: retq
161;
162; SSE41-LABEL: load_double4_0u2u:
163; SSE41: # %bb.0:
164; SSE41-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
165; SSE41-NEXT: movddup {{.*#+}} xmm1 = mem[0,0]
166; SSE41-NEXT: retq
167;
168; AVX-LABEL: load_double4_0u2u:
169; AVX: # %bb.0:
Simon Pilgrim6a585832019-07-10 10:46:36 +0000170; AVX-NEXT: vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
Simon Pilgrime602f702019-07-04 15:00:04 +0000171; AVX-NEXT: retq
172 %2 = load double, double* %0, align 8
173 %3 = insertelement <4 x double> undef, double %2, i32 0
174 %4 = getelementptr inbounds double, double* %0, i64 2
175 %5 = load double, double* %4, align 8
176 %6 = insertelement <4 x double> %3, double %5, i32 2
177 %7 = shufflevector <4 x double> %6, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
178 ret <4 x double> %7
179}