blob: 05e7f50909528be2fd2fe6159f7ed706bfe2f7e7 [file] [log] [blame]
Bob Wilson83815ae2009-10-09 20:20:54 +00001; RUN: llc < %s -mattr=+neon | FileCheck %s
2target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32"
3target triple = "thumbv7-elf"
Bob Wilson5bafff32009-06-22 23:27:02 +00004
5define i32 @vget_lanes8(<8 x i8>* %A) nounwind {
Bob Wilson9b379dc2009-09-15 20:58:02 +00006;CHECK: vget_lanes8:
7;CHECK: vmov.s8
Bob Wilson5bafff32009-06-22 23:27:02 +00008 %tmp1 = load <8 x i8>* %A
9 %tmp2 = extractelement <8 x i8> %tmp1, i32 1
10 %tmp3 = sext i8 %tmp2 to i32
11 ret i32 %tmp3
12}
13
14define i32 @vget_lanes16(<4 x i16>* %A) nounwind {
Bob Wilson9b379dc2009-09-15 20:58:02 +000015;CHECK: vget_lanes16:
16;CHECK: vmov.s16
Bob Wilson5bafff32009-06-22 23:27:02 +000017 %tmp1 = load <4 x i16>* %A
18 %tmp2 = extractelement <4 x i16> %tmp1, i32 1
19 %tmp3 = sext i16 %tmp2 to i32
20 ret i32 %tmp3
21}
22
23define i32 @vget_laneu8(<8 x i8>* %A) nounwind {
Bob Wilson9b379dc2009-09-15 20:58:02 +000024;CHECK: vget_laneu8:
25;CHECK: vmov.u8
Bob Wilson5bafff32009-06-22 23:27:02 +000026 %tmp1 = load <8 x i8>* %A
27 %tmp2 = extractelement <8 x i8> %tmp1, i32 1
28 %tmp3 = zext i8 %tmp2 to i32
29 ret i32 %tmp3
30}
31
32define i32 @vget_laneu16(<4 x i16>* %A) nounwind {
Bob Wilson9b379dc2009-09-15 20:58:02 +000033;CHECK: vget_laneu16:
34;CHECK: vmov.u16
Bob Wilson5bafff32009-06-22 23:27:02 +000035 %tmp1 = load <4 x i16>* %A
36 %tmp2 = extractelement <4 x i16> %tmp1, i32 1
37 %tmp3 = zext i16 %tmp2 to i32
38 ret i32 %tmp3
39}
40
41; Do a vector add to keep the extraction from being done directly from memory.
42define i32 @vget_lanei32(<2 x i32>* %A) nounwind {
Bob Wilson9b379dc2009-09-15 20:58:02 +000043;CHECK: vget_lanei32:
44;CHECK: vmov.32
Bob Wilson5bafff32009-06-22 23:27:02 +000045 %tmp1 = load <2 x i32>* %A
46 %tmp2 = add <2 x i32> %tmp1, %tmp1
47 %tmp3 = extractelement <2 x i32> %tmp2, i32 1
48 ret i32 %tmp3
49}
50
51define i32 @vgetQ_lanes8(<16 x i8>* %A) nounwind {
Bob Wilson9b379dc2009-09-15 20:58:02 +000052;CHECK: vgetQ_lanes8:
53;CHECK: vmov.s8
Bob Wilson5bafff32009-06-22 23:27:02 +000054 %tmp1 = load <16 x i8>* %A
55 %tmp2 = extractelement <16 x i8> %tmp1, i32 1
56 %tmp3 = sext i8 %tmp2 to i32
57 ret i32 %tmp3
58}
59
60define i32 @vgetQ_lanes16(<8 x i16>* %A) nounwind {
Bob Wilson9b379dc2009-09-15 20:58:02 +000061;CHECK: vgetQ_lanes16:
62;CHECK: vmov.s16
Bob Wilson5bafff32009-06-22 23:27:02 +000063 %tmp1 = load <8 x i16>* %A
64 %tmp2 = extractelement <8 x i16> %tmp1, i32 1
65 %tmp3 = sext i16 %tmp2 to i32
66 ret i32 %tmp3
67}
68
69define i32 @vgetQ_laneu8(<16 x i8>* %A) nounwind {
Bob Wilson9b379dc2009-09-15 20:58:02 +000070;CHECK: vgetQ_laneu8:
71;CHECK: vmov.u8
Bob Wilson5bafff32009-06-22 23:27:02 +000072 %tmp1 = load <16 x i8>* %A
73 %tmp2 = extractelement <16 x i8> %tmp1, i32 1
74 %tmp3 = zext i8 %tmp2 to i32
75 ret i32 %tmp3
76}
77
78define i32 @vgetQ_laneu16(<8 x i16>* %A) nounwind {
Bob Wilson9b379dc2009-09-15 20:58:02 +000079;CHECK: vgetQ_laneu16:
80;CHECK: vmov.u16
Bob Wilson5bafff32009-06-22 23:27:02 +000081 %tmp1 = load <8 x i16>* %A
82 %tmp2 = extractelement <8 x i16> %tmp1, i32 1
83 %tmp3 = zext i16 %tmp2 to i32
84 ret i32 %tmp3
85}
86
87; Do a vector add to keep the extraction from being done directly from memory.
88define i32 @vgetQ_lanei32(<4 x i32>* %A) nounwind {
Bob Wilson9b379dc2009-09-15 20:58:02 +000089;CHECK: vgetQ_lanei32:
90;CHECK: vmov.32
Bob Wilson5bafff32009-06-22 23:27:02 +000091 %tmp1 = load <4 x i32>* %A
92 %tmp2 = add <4 x i32> %tmp1, %tmp1
93 %tmp3 = extractelement <4 x i32> %tmp2, i32 1
94 ret i32 %tmp3
95}
Bob Wilson83815ae2009-10-09 20:20:54 +000096
97define arm_aapcs_vfpcc void @test_vget_laneu16() nounwind {
98entry:
99; CHECK: vmov.u16 r0, d0[1]
100 %arg0_uint16x4_t = alloca <4 x i16> ; <<4 x i16>*> [#uses=1]
101 %out_uint16_t = alloca i16 ; <i16*> [#uses=1]
102 %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
103 %0 = load <4 x i16>* %arg0_uint16x4_t, align 8 ; <<4 x i16>> [#uses=1]
104 %1 = extractelement <4 x i16> %0, i32 1 ; <i16> [#uses=1]
105 store i16 %1, i16* %out_uint16_t, align 2
106 br label %return
107
108return: ; preds = %entry
109 ret void
110}
111
112define arm_aapcs_vfpcc void @test_vget_laneu8() nounwind {
113entry:
114; CHECK: vmov.u8 r0, d0[1]
115 %arg0_uint8x8_t = alloca <8 x i8> ; <<8 x i8>*> [#uses=1]
116 %out_uint8_t = alloca i8 ; <i8*> [#uses=1]
117 %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
118 %0 = load <8 x i8>* %arg0_uint8x8_t, align 8 ; <<8 x i8>> [#uses=1]
119 %1 = extractelement <8 x i8> %0, i32 1 ; <i8> [#uses=1]
120 store i8 %1, i8* %out_uint8_t, align 1
121 br label %return
122
123return: ; preds = %entry
124 ret void
125}
126
127define arm_aapcs_vfpcc void @test_vgetQ_laneu16() nounwind {
128entry:
129; CHECK: vmov.u16 r0, d0[1]
130 %arg0_uint16x8_t = alloca <8 x i16> ; <<8 x i16>*> [#uses=1]
131 %out_uint16_t = alloca i16 ; <i16*> [#uses=1]
132 %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
133 %0 = load <8 x i16>* %arg0_uint16x8_t, align 16 ; <<8 x i16>> [#uses=1]
134 %1 = extractelement <8 x i16> %0, i32 1 ; <i16> [#uses=1]
135 store i16 %1, i16* %out_uint16_t, align 2
136 br label %return
137
138return: ; preds = %entry
139 ret void
140}
141
142define arm_aapcs_vfpcc void @test_vgetQ_laneu8() nounwind {
143entry:
144; CHECK: vmov.u8 r0, d0[1]
145 %arg0_uint8x16_t = alloca <16 x i8> ; <<16 x i8>*> [#uses=1]
146 %out_uint8_t = alloca i8 ; <i8*> [#uses=1]
147 %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
148 %0 = load <16 x i8>* %arg0_uint8x16_t, align 16 ; <<16 x i8>> [#uses=1]
149 %1 = extractelement <16 x i8> %0, i32 1 ; <i8> [#uses=1]
150 store i8 %1, i8* %out_uint8_t, align 1
151 br label %return
152
153return: ; preds = %entry
154 ret void
155}
156
157define <8 x i8> @vset_lane8(<8 x i8>* %A, i8 %B) nounwind {
158;CHECK: vset_lane8:
159;CHECK: vmov.8
160 %tmp1 = load <8 x i8>* %A
161 %tmp2 = insertelement <8 x i8> %tmp1, i8 %B, i32 1
162 ret <8 x i8> %tmp2
163}
164
165define <4 x i16> @vset_lane16(<4 x i16>* %A, i16 %B) nounwind {
166;CHECK: vset_lane16:
167;CHECK: vmov.16
168 %tmp1 = load <4 x i16>* %A
169 %tmp2 = insertelement <4 x i16> %tmp1, i16 %B, i32 1
170 ret <4 x i16> %tmp2
171}
172
173define <2 x i32> @vset_lane32(<2 x i32>* %A, i32 %B) nounwind {
174;CHECK: vset_lane32:
175;CHECK: vmov.32
176 %tmp1 = load <2 x i32>* %A
177 %tmp2 = insertelement <2 x i32> %tmp1, i32 %B, i32 1
178 ret <2 x i32> %tmp2
179}
180
181define <16 x i8> @vsetQ_lane8(<16 x i8>* %A, i8 %B) nounwind {
182;CHECK: vsetQ_lane8:
183;CHECK: vmov.8
184 %tmp1 = load <16 x i8>* %A
185 %tmp2 = insertelement <16 x i8> %tmp1, i8 %B, i32 1
186 ret <16 x i8> %tmp2
187}
188
189define <8 x i16> @vsetQ_lane16(<8 x i16>* %A, i16 %B) nounwind {
190;CHECK: vsetQ_lane16:
191;CHECK: vmov.16
192 %tmp1 = load <8 x i16>* %A
193 %tmp2 = insertelement <8 x i16> %tmp1, i16 %B, i32 1
194 ret <8 x i16> %tmp2
195}
196
197define <4 x i32> @vsetQ_lane32(<4 x i32>* %A, i32 %B) nounwind {
198;CHECK: vsetQ_lane32:
199;CHECK: vmov.32
200 %tmp1 = load <4 x i32>* %A
201 %tmp2 = insertelement <4 x i32> %tmp1, i32 %B, i32 1
202 ret <4 x i32> %tmp2
203}
204
205define arm_aapcs_vfpcc <2 x float> @test_vset_lanef32(float %arg0_float32_t, <2 x float> %arg1_float32x2_t) nounwind {
206;CHECK: test_vset_lanef32:
Jakob Stoklund Olesena24986d2010-06-24 18:15:01 +0000207;CHECK: vmov.f32 s3, s0
Rafael Espindolaa5e82a52010-07-06 16:24:34 +0000208;CHECK: vmov.f64 d0, d1
Bob Wilson83815ae2009-10-09 20:20:54 +0000209entry:
210 %0 = insertelement <2 x float> %arg1_float32x2_t, float %arg0_float32_t, i32 1 ; <<2 x float>> [#uses=1]
211 ret <2 x float> %0
212}