blob: 8d7ded5be06ea181467fb08104a66a563caf1ef3 [file] [log] [blame]
Jakob Stoklund Olesen0bb3dd72012-09-17 23:03:25 +00001; RUN: llc < %s -mcpu=cortex-a9 -new-coalescer | FileCheck %s
Jakob Stoklund Olesen984997b2012-05-15 23:31:35 +00002target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
3target triple = "thumbv7-apple-ios0.0.0"
4
5; CHECK: f
6; The vld2 and vst2 are not aligned wrt each other, the second Q loaded is the
7; first one stored.
8; The coalescer must find a super-register larger than QQ to eliminate the copy
9; setting up the vst2 data.
10; CHECK: vld2
11; CHECK-NOT: vorr
12; CHECK-NOT: vmov
13; CHECK: vst2
14define void @f(float* %p, i32 %c) nounwind ssp {
15entry:
16 %0 = bitcast float* %p to i8*
17 %vld2 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %0, i32 4)
18 %vld221 = extractvalue { <4 x float>, <4 x float> } %vld2, 1
19 %add.ptr = getelementptr inbounds float* %p, i32 8
20 %1 = bitcast float* %add.ptr to i8*
21 tail call void @llvm.arm.neon.vst2.v4f32(i8* %1, <4 x float> %vld221, <4 x float> undef, i32 4)
22 ret void
23}
24
25; CHECK: f1
26; FIXME: This function still has copies.
27define void @f1(float* %p, i32 %c) nounwind ssp {
28entry:
29 %0 = bitcast float* %p to i8*
30 %vld2 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %0, i32 4)
31 %vld221 = extractvalue { <4 x float>, <4 x float> } %vld2, 1
32 %add.ptr = getelementptr inbounds float* %p, i32 8
33 %1 = bitcast float* %add.ptr to i8*
34 %vld22 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %1, i32 4)
35 %vld2215 = extractvalue { <4 x float>, <4 x float> } %vld22, 0
36 tail call void @llvm.arm.neon.vst2.v4f32(i8* %1, <4 x float> %vld221, <4 x float> %vld2215, i32 4)
37 ret void
38}
39
40; CHECK: f2
41; FIXME: This function still has copies.
42define void @f2(float* %p, i32 %c) nounwind ssp {
43entry:
44 %0 = bitcast float* %p to i8*
45 %vld2 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %0, i32 4)
46 %vld224 = extractvalue { <4 x float>, <4 x float> } %vld2, 1
47 br label %do.body
48
49do.body: ; preds = %do.body, %entry
50 %qq0.0.1.0 = phi <4 x float> [ %vld224, %entry ], [ %vld2216, %do.body ]
51 %c.addr.0 = phi i32 [ %c, %entry ], [ %dec, %do.body ]
52 %p.addr.0 = phi float* [ %p, %entry ], [ %add.ptr, %do.body ]
53 %add.ptr = getelementptr inbounds float* %p.addr.0, i32 8
54 %1 = bitcast float* %add.ptr to i8*
55 %vld22 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %1, i32 4)
56 %vld2215 = extractvalue { <4 x float>, <4 x float> } %vld22, 0
57 %vld2216 = extractvalue { <4 x float>, <4 x float> } %vld22, 1
58 tail call void @llvm.arm.neon.vst2.v4f32(i8* %1, <4 x float> %qq0.0.1.0, <4 x float> %vld2215, i32 4)
59 %dec = add nsw i32 %c.addr.0, -1
60 %tobool = icmp eq i32 %dec, 0
61 br i1 %tobool, label %do.end, label %do.body
62
63do.end: ; preds = %do.body
64 ret void
65}
66
67declare { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8*, i32) nounwind readonly
68declare void @llvm.arm.neon.vst2.v4f32(i8*, <4 x float>, <4 x float>, i32) nounwind
Jakob Stoklund Olesen0bb3dd72012-09-17 23:03:25 +000069
70; CHECK: f3
71; This function has lane insertions that span basic blocks.
72; The trivial REG_SEQUENCE lowering can't handle that, but the coalescer can.
73;
74; void f3(float *p, float *q) {
75; float32x2_t x;
76; x[1] = p[3];
77; if (q)
78; x[0] = q[0] + q[1];
79; else
80; x[0] = p[2];
81; vst1_f32(p+4, x);
82; }
83;
84; CHECK-NOT: vmov
85; CHECK-NOT: vorr
86define void @f3(float* %p, float* %q) nounwind ssp {
87entry:
88 %arrayidx = getelementptr inbounds float* %p, i32 3
89 %0 = load float* %arrayidx, align 4
90 %vecins = insertelement <2 x float> undef, float %0, i32 1
91 %tobool = icmp eq float* %q, null
92 br i1 %tobool, label %if.else, label %if.then
93
94if.then: ; preds = %entry
95 %1 = load float* %q, align 4
96 %arrayidx2 = getelementptr inbounds float* %q, i32 1
97 %2 = load float* %arrayidx2, align 4
98 %add = fadd float %1, %2
99 %vecins3 = insertelement <2 x float> %vecins, float %add, i32 0
100 br label %if.end
101
102if.else: ; preds = %entry
103 %arrayidx4 = getelementptr inbounds float* %p, i32 2
104 %3 = load float* %arrayidx4, align 4
105 %vecins5 = insertelement <2 x float> %vecins, float %3, i32 0
106 br label %if.end
107
108if.end: ; preds = %if.else, %if.then
109 %x.0 = phi <2 x float> [ %vecins3, %if.then ], [ %vecins5, %if.else ]
110 %add.ptr = getelementptr inbounds float* %p, i32 4
111 %4 = bitcast float* %add.ptr to i8*
112 tail call void @llvm.arm.neon.vst1.v2f32(i8* %4, <2 x float> %x.0, i32 4)
113 ret void
114}
115
116declare void @llvm.arm.neon.vst1.v2f32(i8*, <2 x float>, i32) nounwind
Jakob Stoklund Olesen7d3c9c02012-09-19 21:29:18 +0000117declare <2 x float> @llvm.arm.neon.vld1.v2f32(i8*, i32) nounwind readonly
118
119; CHECK: f4
120; This function inserts a lane into a fully defined vector.
121; The destination lane isn't read, so the subregs can coalesce.
122; CHECK-NOT: vmov
123; CHECK-NOT: vorr
124define void @f4(float* %p, float* %q) nounwind ssp {
125entry:
126 %0 = bitcast float* %p to i8*
127 %vld1 = tail call <2 x float> @llvm.arm.neon.vld1.v2f32(i8* %0, i32 4)
128 %tobool = icmp eq float* %q, null
129 br i1 %tobool, label %if.end, label %if.then
130
131if.then: ; preds = %entry
132 %1 = load float* %q, align 4
133 %arrayidx1 = getelementptr inbounds float* %q, i32 1
134 %2 = load float* %arrayidx1, align 4
135 %add = fadd float %1, %2
136 %vecins = insertelement <2 x float> %vld1, float %add, i32 1
137 br label %if.end
138
139if.end: ; preds = %entry, %if.then
140 %x.0 = phi <2 x float> [ %vecins, %if.then ], [ %vld1, %entry ]
141 tail call void @llvm.arm.neon.vst1.v2f32(i8* %0, <2 x float> %x.0, i32 4)
142 ret void
143}
Jakob Stoklund Olesenb8707fa2012-09-20 23:08:42 +0000144
145; CHECK: f5
146; Coalesce vector lanes through phis.
147; CHECK: vmov.f32 {{.*}}, #1.0
148; CHECK-NOT: vmov
149; CHECK-NOT: vorr
150; CHECK: %if.end
151; We may leave the last insertelement in the if.end block.
152; It is inserting the %add value into a dead lane, but %add causes interference
153; in the entry block, and we don't do dead lane checks across basic blocks.
154define void @f5(float* %p, float* %q) nounwind ssp {
155entry:
156 %0 = bitcast float* %p to i8*
157 %vld1 = tail call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %0, i32 4)
158 %vecext = extractelement <4 x float> %vld1, i32 0
159 %vecext1 = extractelement <4 x float> %vld1, i32 1
160 %vecext2 = extractelement <4 x float> %vld1, i32 2
161 %vecext3 = extractelement <4 x float> %vld1, i32 3
162 %add = fadd float %vecext3, 1.000000e+00
163 %tobool = icmp eq float* %q, null
164 br i1 %tobool, label %if.end, label %if.then
165
166if.then: ; preds = %entry
167 %arrayidx = getelementptr inbounds float* %q, i32 1
168 %1 = load float* %arrayidx, align 4
169 %add4 = fadd float %vecext, %1
170 %2 = load float* %q, align 4
171 %add6 = fadd float %vecext1, %2
172 %arrayidx7 = getelementptr inbounds float* %q, i32 2
173 %3 = load float* %arrayidx7, align 4
174 %add8 = fadd float %vecext2, %3
175 br label %if.end
176
177if.end: ; preds = %entry, %if.then
178 %a.0 = phi float [ %add4, %if.then ], [ %vecext, %entry ]
179 %b.0 = phi float [ %add6, %if.then ], [ %vecext1, %entry ]
180 %c.0 = phi float [ %add8, %if.then ], [ %vecext2, %entry ]
181 %vecinit = insertelement <4 x float> undef, float %a.0, i32 0
182 %vecinit9 = insertelement <4 x float> %vecinit, float %b.0, i32 1
183 %vecinit10 = insertelement <4 x float> %vecinit9, float %c.0, i32 2
184 %vecinit11 = insertelement <4 x float> %vecinit10, float %add, i32 3
185 tail call void @llvm.arm.neon.vst1.v4f32(i8* %0, <4 x float> %vecinit11, i32 4)
186 ret void
187}
188
189declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly
190
191declare void @llvm.arm.neon.vst1.v4f32(i8*, <4 x float>, i32) nounwind