blob: 2f55204aa407209c6ed119ae3ceb76263dfdfef7 [file] [log] [blame]
Sebastian Popa204f722012-11-30 19:08:04 +00001; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
2
3; PR12281
4; Test generataion of code for vmull instruction when multiplying 128-bit
5; vectors that were created by sign-extending smaller vector sizes.
6;
7; The vmull operation requires 64-bit vectors, so we must extend the original
8; vector size to 64 bits for vmull operation.
9; Previously failed with an assertion because the <4 x i8> vector was too small
10; for vmull.
11
12; Vector x Constant
13; v4i8
14;
15define void @sextload_v4i8_c(<4 x i8>* %v) nounwind {
16;CHECK: sextload_v4i8_c:
17entry:
18 %0 = load <4 x i8>* %v, align 8
19 %v0 = sext <4 x i8> %0 to <4 x i32>
20;CHECK: vmull
21 %v1 = mul <4 x i32> %v0, <i32 3, i32 3, i32 3, i32 3>
22 store <4 x i32> %v1, <4 x i32>* undef, align 8
23 ret void;
24}
25
26; v2i8
27;
28define void @sextload_v2i8_c(<2 x i8>* %v) nounwind {
29;CHECK: sextload_v2i8_c:
30entry:
31 %0 = load <2 x i8>* %v, align 8
32 %v0 = sext <2 x i8> %0 to <2 x i64>
33;CHECK: vmull
34 %v1 = mul <2 x i64> %v0, <i64 3, i64 3>
35 store <2 x i64> %v1, <2 x i64>* undef, align 8
36 ret void;
37}
38
39; v2i16
40;
41define void @sextload_v2i16_c(<2 x i16>* %v) nounwind {
42;CHECK: sextload_v2i16_c:
43entry:
44 %0 = load <2 x i16>* %v, align 8
45 %v0 = sext <2 x i16> %0 to <2 x i64>
46;CHECK: vmull
47 %v1 = mul <2 x i64> %v0, <i64 3, i64 3>
48 store <2 x i64> %v1, <2 x i64>* undef, align 8
49 ret void;
50}
51
52
53; Vector x Vector
54; v4i8
55;
56define void @sextload_v4i8_v(<4 x i8>* %v, <4 x i8>* %p) nounwind {
57;CHECK: sextload_v4i8_v:
58entry:
59 %0 = load <4 x i8>* %v, align 8
60 %v0 = sext <4 x i8> %0 to <4 x i32>
61
62 %1 = load <4 x i8>* %p, align 8
63 %v2 = sext <4 x i8> %1 to <4 x i32>
64;CHECK: vmull
65 %v1 = mul <4 x i32> %v0, %v2
66 store <4 x i32> %v1, <4 x i32>* undef, align 8
67 ret void;
68}
69
70; v2i8
71;
72define void @sextload_v2i8_v(<2 x i8>* %v, <2 x i8>* %p) nounwind {
73;CHECK: sextload_v2i8_v:
74entry:
75 %0 = load <2 x i8>* %v, align 8
76 %v0 = sext <2 x i8> %0 to <2 x i64>
77
78 %1 = load <2 x i8>* %p, align 8
79 %v2 = sext <2 x i8> %1 to <2 x i64>
80;CHECK: vmull
81 %v1 = mul <2 x i64> %v0, %v2
82 store <2 x i64> %v1, <2 x i64>* undef, align 8
83 ret void;
84}
85
86; v2i16
87;
88define void @sextload_v2i16_v(<2 x i16>* %v, <2 x i16>* %p) nounwind {
89;CHECK: sextload_v2i16_v:
90entry:
91 %0 = load <2 x i16>* %v, align 8
92 %v0 = sext <2 x i16> %0 to <2 x i64>
93
94 %1 = load <2 x i16>* %p, align 8
95 %v2 = sext <2 x i16> %1 to <2 x i64>
96;CHECK: vmull
97 %v1 = mul <2 x i64> %v0, %v2
98 store <2 x i64> %v1, <2 x i64>* undef, align 8
99 ret void;
100}
101
102
103; Vector(small) x Vector(big)
104; v4i8 x v4i16
105;
106define void @sextload_v4i8_vs(<4 x i8>* %v, <4 x i16>* %p) nounwind {
107;CHECK: sextload_v4i8_vs:
108entry:
109 %0 = load <4 x i8>* %v, align 8
110 %v0 = sext <4 x i8> %0 to <4 x i32>
111
112 %1 = load <4 x i16>* %p, align 8
113 %v2 = sext <4 x i16> %1 to <4 x i32>
114;CHECK: vmull
115 %v1 = mul <4 x i32> %v0, %v2
116 store <4 x i32> %v1, <4 x i32>* undef, align 8
117 ret void;
118}
119
120; v2i8
121; v2i8 x v2i16
122define void @sextload_v2i8_vs(<2 x i8>* %v, <2 x i16>* %p) nounwind {
123;CHECK: sextload_v2i8_vs:
124entry:
125 %0 = load <2 x i8>* %v, align 8
126 %v0 = sext <2 x i8> %0 to <2 x i64>
127
128 %1 = load <2 x i16>* %p, align 8
129 %v2 = sext <2 x i16> %1 to <2 x i64>
130;CHECK: vmull
131 %v1 = mul <2 x i64> %v0, %v2
132 store <2 x i64> %v1, <2 x i64>* undef, align 8
133 ret void;
134}
135
136; v2i16
137; v2i16 x v2i32
138define void @sextload_v2i16_vs(<2 x i16>* %v, <2 x i32>* %p) nounwind {
139;CHECK: sextload_v2i16_vs:
140entry:
141 %0 = load <2 x i16>* %v, align 8
142 %v0 = sext <2 x i16> %0 to <2 x i64>
143
144 %1 = load <2 x i32>* %p, align 8
145 %v2 = sext <2 x i32> %1 to <2 x i64>
146;CHECK: vmull
147 %v1 = mul <2 x i64> %v0, %v2
148 store <2 x i64> %v1, <2 x i64>* undef, align 8
149 ret void;
150}