blob: 7f2b8cc8f832012f8e511b03d5a723a205f42e1f [file] [log] [blame]
Dan Gohmand7df9e12010-02-12 10:34:29 +00001; RUN: llc < %s -march=x86-64 -O3 | FileCheck %s
2target datalayout = "e-p:64:64:64"
3target triple = "x86_64-unknown-unknown"
4
5; Full strength reduction reduces register pressure from 5 to 4 here.
6; Instruction selection should use the FLAGS value from the dec for
7; the branch. Scheduling should push the adds upwards.
8
9; CHECK: full_me_0:
10; CHECK: movsd (%rsi), %xmm0
11; CHECK: addq $8, %rsi
12; CHECK: mulsd (%rdx), %xmm0
13; CHECK: addq $8, %rdx
14; CHECK: movsd %xmm0, (%rdi)
15; CHECK: addq $8, %rdi
16; CHECK: decq %rcx
17; CHECK: jne
18
19define void @full_me_0(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
20entry:
21 %t0 = icmp sgt i64 %n, 0
22 br i1 %t0, label %loop, label %return
23
24loop:
25 %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
26 %Ai = getelementptr inbounds double* %A, i64 %i
27 %Bi = getelementptr inbounds double* %B, i64 %i
28 %Ci = getelementptr inbounds double* %C, i64 %i
29 %t1 = load double* %Bi
30 %t2 = load double* %Ci
31 %m = fmul double %t1, %t2
32 store double %m, double* %Ai
33 %i.next = add nsw i64 %i, 1
34 %exitcond = icmp eq i64 %i.next, %n
35 br i1 %exitcond, label %return, label %loop
36
37return:
38 ret void
39}
40
41; Mostly-full strength reduction means we do full strength reduction on all
42; except for the offsets.
43;
44; Given a choice between constant offsets -2048 and 2048, choose the negative
45; value, because at boundary conditions it has a smaller encoding.
46; TODO: That's an over-general heuristic. It would be better for the target
47; to indicate what the encoding cost would be. Then using a 2048 offset
48; would be better on x86-64, since the start value would be 0 instead of
49; 2048.
50
51; CHECK: mostly_full_me_0:
52; CHECK: movsd -2048(%rsi), %xmm0
53; CHECK: mulsd -2048(%rdx), %xmm0
54; CHECK: movsd %xmm0, -2048(%rdi)
55; CHECK: movsd (%rsi), %xmm0
56; CHECK: addq $8, %rsi
57; CHECK: divsd (%rdx), %xmm0
58; CHECK: addq $8, %rdx
59; CHECK: movsd %xmm0, (%rdi)
60; CHECK: addq $8, %rdi
61; CHECK: decq %rcx
62; CHECK: jne
63
64define void @mostly_full_me_0(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
65entry:
66 %t0 = icmp sgt i64 %n, 0
67 br i1 %t0, label %loop, label %return
68
69loop:
70 %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
71 %Ai = getelementptr inbounds double* %A, i64 %i
72 %Bi = getelementptr inbounds double* %B, i64 %i
73 %Ci = getelementptr inbounds double* %C, i64 %i
74 %t1 = load double* %Bi
75 %t2 = load double* %Ci
76 %m = fmul double %t1, %t2
77 store double %m, double* %Ai
78 %j = add i64 %i, 256
79 %Aj = getelementptr inbounds double* %A, i64 %j
80 %Bj = getelementptr inbounds double* %B, i64 %j
81 %Cj = getelementptr inbounds double* %C, i64 %j
82 %t3 = load double* %Bj
83 %t4 = load double* %Cj
84 %o = fdiv double %t3, %t4
85 store double %o, double* %Aj
86 %i.next = add nsw i64 %i, 1
87 %exitcond = icmp eq i64 %i.next, %n
88 br i1 %exitcond, label %return, label %loop
89
90return:
91 ret void
92}
93
94; A minor variation on mostly_full_me_0.
95; Prefer to start the indvar at 0.
96
97; CHECK: mostly_full_me_1:
98; CHECK: movsd (%rsi), %xmm0
99; CHECK: mulsd (%rdx), %xmm0
100; CHECK: movsd %xmm0, (%rdi)
101; CHECK: movsd -2048(%rsi), %xmm0
102; CHECK: addq $8, %rsi
103; CHECK: divsd -2048(%rdx), %xmm0
104; CHECK: addq $8, %rdx
105; CHECK: movsd %xmm0, -2048(%rdi)
106; CHECK: addq $8, %rdi
107; CHECK: decq %rcx
108; CHECK: jne
109
110define void @mostly_full_me_1(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
111entry:
112 %t0 = icmp sgt i64 %n, 0
113 br i1 %t0, label %loop, label %return
114
115loop:
116 %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
117 %Ai = getelementptr inbounds double* %A, i64 %i
118 %Bi = getelementptr inbounds double* %B, i64 %i
119 %Ci = getelementptr inbounds double* %C, i64 %i
120 %t1 = load double* %Bi
121 %t2 = load double* %Ci
122 %m = fmul double %t1, %t2
123 store double %m, double* %Ai
124 %j = sub i64 %i, 256
125 %Aj = getelementptr inbounds double* %A, i64 %j
126 %Bj = getelementptr inbounds double* %B, i64 %j
127 %Cj = getelementptr inbounds double* %C, i64 %j
128 %t3 = load double* %Bj
129 %t4 = load double* %Cj
130 %o = fdiv double %t3, %t4
131 store double %o, double* %Aj
132 %i.next = add nsw i64 %i, 1
133 %exitcond = icmp eq i64 %i.next, %n
134 br i1 %exitcond, label %return, label %loop
135
136return:
137 ret void
138}
139
140; A slightly less minor variation on mostly_full_me_0.
141
142; CHECK: mostly_full_me_2:
143; CHECK: movsd (%rsi), %xmm0
144; CHECK: mulsd (%rdx), %xmm0
145; CHECK: movsd %xmm0, (%rdi)
146; CHECK: movsd -4096(%rsi), %xmm0
147; CHECK: addq $8, %rsi
148; CHECK: divsd -4096(%rdx), %xmm0
149; CHECK: addq $8, %rdx
150; CHECK: movsd %xmm0, -4096(%rdi)
151; CHECK: addq $8, %rdi
152; CHECK: decq %rcx
153; CHECK: jne
154
155define void @mostly_full_me_2(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
156entry:
157 %t0 = icmp sgt i64 %n, 0
158 br i1 %t0, label %loop, label %return
159
160loop:
161 %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
162 %k = add i64 %i, 256
163 %Ak = getelementptr inbounds double* %A, i64 %k
164 %Bk = getelementptr inbounds double* %B, i64 %k
165 %Ck = getelementptr inbounds double* %C, i64 %k
166 %t1 = load double* %Bk
167 %t2 = load double* %Ck
168 %m = fmul double %t1, %t2
169 store double %m, double* %Ak
170 %j = sub i64 %i, 256
171 %Aj = getelementptr inbounds double* %A, i64 %j
172 %Bj = getelementptr inbounds double* %B, i64 %j
173 %Cj = getelementptr inbounds double* %C, i64 %j
174 %t3 = load double* %Bj
175 %t4 = load double* %Cj
176 %o = fdiv double %t3, %t4
177 store double %o, double* %Aj
178 %i.next = add nsw i64 %i, 1
179 %exitcond = icmp eq i64 %i.next, %n
180 br i1 %exitcond, label %return, label %loop
181
182return:
183 ret void
184}
185
186; In this test, the counting IV exit value is used, so full strength reduction
187; would not reduce register pressure. IndVarSimplify ought to simplify such
188; cases away, but it's useful here to verify that LSR's register pressure
189; heuristics are working as expected.
190
191; CHECK: count_me_0:
192; CHECK: movsd (%rsi,%rax,8), %xmm0
193; CHECK: mulsd (%rdx,%rax,8), %xmm0
194; CHECK: movsd %xmm0, (%rdi,%rax,8)
195; CHECK: incq %rax
196; CHECK: cmpq %rax, %rcx
197; CHECK: jne
198
199define i64 @count_me_0(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
200entry:
201 %t0 = icmp sgt i64 %n, 0
202 br i1 %t0, label %loop, label %return
203
204loop:
205 %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
206 %Ai = getelementptr inbounds double* %A, i64 %i
207 %Bi = getelementptr inbounds double* %B, i64 %i
208 %Ci = getelementptr inbounds double* %C, i64 %i
209 %t1 = load double* %Bi
210 %t2 = load double* %Ci
211 %m = fmul double %t1, %t2
212 store double %m, double* %Ai
213 %i.next = add nsw i64 %i, 1
214 %exitcond = icmp eq i64 %i.next, %n
215 br i1 %exitcond, label %return, label %loop
216
217return:
218 %q = phi i64 [ 0, %entry ], [ %i.next, %loop ]
219 ret i64 %q
220}
221
222; In this test, the trip count value is used, so full strength reduction
223; would not reduce register pressure.
224; (though it would reduce register pressure inside the loop...)
225
226; CHECK: count_me_1:
227; CHECK: movsd (%rsi,%rax,8), %xmm0
228; CHECK: mulsd (%rdx,%rax,8), %xmm0
229; CHECK: movsd %xmm0, (%rdi,%rax,8)
230; CHECK: incq %rax
231; CHECK: cmpq %rax, %rcx
232; CHECK: jne
233
234define i64 @count_me_1(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
235entry:
236 %t0 = icmp sgt i64 %n, 0
237 br i1 %t0, label %loop, label %return
238
239loop:
240 %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
241 %Ai = getelementptr inbounds double* %A, i64 %i
242 %Bi = getelementptr inbounds double* %B, i64 %i
243 %Ci = getelementptr inbounds double* %C, i64 %i
244 %t1 = load double* %Bi
245 %t2 = load double* %Ci
246 %m = fmul double %t1, %t2
247 store double %m, double* %Ai
248 %i.next = add nsw i64 %i, 1
249 %exitcond = icmp eq i64 %i.next, %n
250 br i1 %exitcond, label %return, label %loop
251
252return:
253 %q = phi i64 [ 0, %entry ], [ %n, %loop ]
254 ret i64 %q
255}
256
257; Full strength reduction doesn't save any registers here because the
258; loop tripcount is a constant.
259
260; CHECK: count_me_2:
261; CHECK: movl $10, %eax
262; CHECK: align
263; CHECK: BB7_1:
264; CHECK: movsd -40(%rdi,%rax,8), %xmm0
265; CHECK: addsd -40(%rsi,%rax,8), %xmm0
266; CHECK: movsd %xmm0, -40(%rdx,%rax,8)
267; CHECK: movsd (%rdi,%rax,8), %xmm0
268; CHECK: subsd (%rsi,%rax,8), %xmm0
269; CHECK: movsd %xmm0, (%rdx,%rax,8)
270; CHECK: incq %rax
271; CHECK: cmpq $5010, %rax
272; CHECK: jne
273
274define void @count_me_2(double* nocapture %A, double* nocapture %B, double* nocapture %C) nounwind {
275entry:
276 br label %loop
277
278loop:
279 %i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
280 %i5 = add i64 %i, 5
281 %Ai = getelementptr double* %A, i64 %i5
282 %t2 = load double* %Ai
283 %Bi = getelementptr double* %B, i64 %i5
284 %t4 = load double* %Bi
285 %t5 = fadd double %t2, %t4
286 %Ci = getelementptr double* %C, i64 %i5
287 store double %t5, double* %Ci
288 %i10 = add i64 %i, 10
289 %Ai10 = getelementptr double* %A, i64 %i10
290 %t9 = load double* %Ai10
291 %Bi10 = getelementptr double* %B, i64 %i10
292 %t11 = load double* %Bi10
293 %t12 = fsub double %t9, %t11
294 %Ci10 = getelementptr double* %C, i64 %i10
295 store double %t12, double* %Ci10
296 %i.next = add i64 %i, 1
297 %exitcond = icmp eq i64 %i.next, 5000
298 br i1 %exitcond, label %return, label %loop
299
300return:
301 ret void
302}
303
304; This should be fully strength-reduced to reduce register pressure.
305
306; CHECK: full_me_1:
307; CHECK: align
308; CHECK: BB8_1:
309; CHECK: movsd (%rdi), %xmm0
310; CHECK: addsd (%rsi), %xmm0
311; CHECK: movsd %xmm0, (%rdx)
312; CHECK: movsd 40(%rdi), %xmm0
313; CHECK: addq $8, %rdi
314; CHECK: subsd 40(%rsi), %xmm0
315; CHECK: addq $8, %rsi
316; CHECK: movsd %xmm0, 40(%rdx)
317; CHECK: addq $8, %rdx
318; CHECK: decq %rcx
319; CHECK: jne
320
321define void @full_me_1(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
322entry:
323 br label %loop
324
325loop:
326 %i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
327 %i5 = add i64 %i, 5
328 %Ai = getelementptr double* %A, i64 %i5
329 %t2 = load double* %Ai
330 %Bi = getelementptr double* %B, i64 %i5
331 %t4 = load double* %Bi
332 %t5 = fadd double %t2, %t4
333 %Ci = getelementptr double* %C, i64 %i5
334 store double %t5, double* %Ci
335 %i10 = add i64 %i, 10
336 %Ai10 = getelementptr double* %A, i64 %i10
337 %t9 = load double* %Ai10
338 %Bi10 = getelementptr double* %B, i64 %i10
339 %t11 = load double* %Bi10
340 %t12 = fsub double %t9, %t11
341 %Ci10 = getelementptr double* %C, i64 %i10
342 store double %t12, double* %Ci10
343 %i.next = add i64 %i, 1
344 %exitcond = icmp eq i64 %i.next, %n
345 br i1 %exitcond, label %return, label %loop
346
347return:
348 ret void
349}
350
351; This is a variation on full_me_0 in which the 0,+,1 induction variable
352; has a non-address use, pinning that value in a register.
353
354; CHECK: count_me_3:
355; CHECK: call
356; CHECK: movsd (%r15,%r13,8), %xmm0
357; CHECK: mulsd (%r14,%r13,8), %xmm0
358; CHECK: movsd %xmm0, (%r12,%r13,8)
359; CHECK: incq %r13
360; CHECK: cmpq %r13, %rbx
361; CHECK: jne
362
363declare void @use(i64)
364
365define void @count_me_3(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
366entry:
367 %t0 = icmp sgt i64 %n, 0
368 br i1 %t0, label %loop, label %return
369
370loop:
371 %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
372 call void @use(i64 %i)
373 %Ai = getelementptr inbounds double* %A, i64 %i
374 %Bi = getelementptr inbounds double* %B, i64 %i
375 %Ci = getelementptr inbounds double* %C, i64 %i
376 %t1 = load double* %Bi
377 %t2 = load double* %Ci
378 %m = fmul double %t1, %t2
379 store double %m, double* %Ai
380 %i.next = add nsw i64 %i, 1
381 %exitcond = icmp eq i64 %i.next, %n
382 br i1 %exitcond, label %return, label %loop
383
384return:
385 ret void
386}