blob: f1b122d667859291a44fc3faa1607dff6dc3c692 [file] [log] [blame]
Sanjay Patelb653de12014-09-10 17:58:16 +00001; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
Nadav Rotem4cb8cda2012-10-31 21:40:39 +00002
3target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
Nadav Rotem4cb8cda2012-10-31 21:40:39 +00004
Stephen Linc1c7a132013-07-14 01:42:54 +00005;CHECK-LABEL: @flags1(
Nadav Rotem4cb8cda2012-10-31 21:40:39 +00006;CHECK: load <4 x i32>
7;CHECK: mul nsw <4 x i32>
8;CHECK: store <4 x i32>
9;CHECK: ret i32
10define i32 @flags1(i32 %n, i32* nocapture %A) nounwind uwtable ssp {
11 %1 = icmp sgt i32 %n, 9
12 br i1 %1, label %.lr.ph, label %._crit_edge
13
14.lr.ph: ; preds = %0, %.lr.ph
15 %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 9, %0 ]
David Blaikie79e6c742015-02-27 19:29:02 +000016 %2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
David Blaikiea79ac142015-02-27 21:17:42 +000017 %3 = load i32, i32* %2, align 4
Nadav Rotem4cb8cda2012-10-31 21:40:39 +000018 %4 = mul nsw i32 %3, 3
19 store i32 %4, i32* %2, align 4
20 %indvars.iv.next = add i64 %indvars.iv, 1
21 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
22 %exitcond = icmp eq i32 %lftr.wideiv, %n
23 br i1 %exitcond, label %._crit_edge, label %.lr.ph
24
25._crit_edge: ; preds = %.lr.ph, %0
26 ret i32 undef
27}
28
29
Stephen Linc1c7a132013-07-14 01:42:54 +000030;CHECK-LABEL: @flags2(
Nadav Rotem4cb8cda2012-10-31 21:40:39 +000031;CHECK: load <4 x i32>
32;CHECK: mul <4 x i32>
33;CHECK: store <4 x i32>
34;CHECK: ret i32
35define i32 @flags2(i32 %n, i32* nocapture %A) nounwind uwtable ssp {
36 %1 = icmp sgt i32 %n, 9
37 br i1 %1, label %.lr.ph, label %._crit_edge
38
39.lr.ph: ; preds = %0, %.lr.ph
40 %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 9, %0 ]
David Blaikie79e6c742015-02-27 19:29:02 +000041 %2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
David Blaikiea79ac142015-02-27 21:17:42 +000042 %3 = load i32, i32* %2, align 4
Nadav Rotem4cb8cda2012-10-31 21:40:39 +000043 %4 = mul i32 %3, 3
44 store i32 %4, i32* %2, align 4
45 %indvars.iv.next = add i64 %indvars.iv, 1
46 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
47 %exitcond = icmp eq i32 %lftr.wideiv, %n
48 br i1 %exitcond, label %._crit_edge, label %.lr.ph
49
50._crit_edge: ; preds = %.lr.ph, %0
51 ret i32 undef
52}
Arnold Schwaighoferab123632014-03-05 21:10:47 +000053
54; Make sure we copy fast math flags and use them for the final reduction.
55; CHECK-LABEL: fast_math
56; CHECK: load <4 x float>
57; CHECK: fadd fast <4 x float>
58; CHECK: br
59; CHECK: fadd fast <4 x float>
60; CHECK: fadd fast <4 x float>
61define float @fast_math(float* noalias %s) {
62entry:
63 br label %for.body
64
65for.body:
66 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
67 %q.04 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
David Blaikie79e6c742015-02-27 19:29:02 +000068 %arrayidx = getelementptr inbounds float, float* %s, i64 %indvars.iv
David Blaikiea79ac142015-02-27 21:17:42 +000069 %0 = load float, float* %arrayidx, align 4
Arnold Schwaighoferab123632014-03-05 21:10:47 +000070 %add = fadd fast float %q.04, %0
71 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
72 %exitcond = icmp eq i64 %indvars.iv.next, 256
73 br i1 %exitcond, label %for.end, label %for.body
74
75for.end:
76 %add.lcssa = phi float [ %add, %for.body ]
77 ret float %add.lcssa
78}