blob: fcbc8741741ef34f84a4735ed58fb117d014435d [file] [log] [blame]
Sanjay Patelb653de12014-09-10 17:58:16 +00001; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
Nadav Rotem4cb8cda2012-10-31 21:40:39 +00002
3target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
4target triple = "x86_64-apple-macosx10.8.0"
5
Stephen Linc1c7a132013-07-14 01:42:54 +00006;CHECK-LABEL: @flags1(
Nadav Rotem4cb8cda2012-10-31 21:40:39 +00007;CHECK: load <4 x i32>
8;CHECK: mul nsw <4 x i32>
9;CHECK: store <4 x i32>
10;CHECK: ret i32
11define i32 @flags1(i32 %n, i32* nocapture %A) nounwind uwtable ssp {
12 %1 = icmp sgt i32 %n, 9
13 br i1 %1, label %.lr.ph, label %._crit_edge
14
15.lr.ph: ; preds = %0, %.lr.ph
16 %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 9, %0 ]
David Blaikie79e6c742015-02-27 19:29:02 +000017 %2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
David Blaikiea79ac142015-02-27 21:17:42 +000018 %3 = load i32, i32* %2, align 4
Nadav Rotem4cb8cda2012-10-31 21:40:39 +000019 %4 = mul nsw i32 %3, 3
20 store i32 %4, i32* %2, align 4
21 %indvars.iv.next = add i64 %indvars.iv, 1
22 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
23 %exitcond = icmp eq i32 %lftr.wideiv, %n
24 br i1 %exitcond, label %._crit_edge, label %.lr.ph
25
26._crit_edge: ; preds = %.lr.ph, %0
27 ret i32 undef
28}
29
30
Stephen Linc1c7a132013-07-14 01:42:54 +000031;CHECK-LABEL: @flags2(
Nadav Rotem4cb8cda2012-10-31 21:40:39 +000032;CHECK: load <4 x i32>
33;CHECK: mul <4 x i32>
34;CHECK: store <4 x i32>
35;CHECK: ret i32
36define i32 @flags2(i32 %n, i32* nocapture %A) nounwind uwtable ssp {
37 %1 = icmp sgt i32 %n, 9
38 br i1 %1, label %.lr.ph, label %._crit_edge
39
40.lr.ph: ; preds = %0, %.lr.ph
41 %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 9, %0 ]
David Blaikie79e6c742015-02-27 19:29:02 +000042 %2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
David Blaikiea79ac142015-02-27 21:17:42 +000043 %3 = load i32, i32* %2, align 4
Nadav Rotem4cb8cda2012-10-31 21:40:39 +000044 %4 = mul i32 %3, 3
45 store i32 %4, i32* %2, align 4
46 %indvars.iv.next = add i64 %indvars.iv, 1
47 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
48 %exitcond = icmp eq i32 %lftr.wideiv, %n
49 br i1 %exitcond, label %._crit_edge, label %.lr.ph
50
51._crit_edge: ; preds = %.lr.ph, %0
52 ret i32 undef
53}
Arnold Schwaighoferab123632014-03-05 21:10:47 +000054
55; Make sure we copy fast math flags and use them for the final reduction.
56; CHECK-LABEL: fast_math
57; CHECK: load <4 x float>
58; CHECK: fadd fast <4 x float>
59; CHECK: br
60; CHECK: fadd fast <4 x float>
61; CHECK: fadd fast <4 x float>
62define float @fast_math(float* noalias %s) {
63entry:
64 br label %for.body
65
66for.body:
67 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
68 %q.04 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
David Blaikie79e6c742015-02-27 19:29:02 +000069 %arrayidx = getelementptr inbounds float, float* %s, i64 %indvars.iv
David Blaikiea79ac142015-02-27 21:17:42 +000070 %0 = load float, float* %arrayidx, align 4
Arnold Schwaighoferab123632014-03-05 21:10:47 +000071 %add = fadd fast float %q.04, %0
72 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
73 %exitcond = icmp eq i64 %indvars.iv.next, 256
74 br i1 %exitcond, label %for.end, label %for.body
75
76for.end:
77 %add.lcssa = phi float [ %add, %for.body ]
78 ret float %add.lcssa
79}