Arnold Schwaighofer | 2e7a922 | 2013-05-14 00:21:18 +0000 | [diff] [blame] | 1 | ; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=2 -S | FileCheck %s |
| 2 | |
| 3 | target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" |
| 4 | |
| 5 | ; Make sure that we can handle multiple integer induction variables. |
Matt Arsenault | e64c7c7 | 2013-10-02 20:29:00 +0000 | [diff] [blame] | 6 | ; CHECK-LABEL: @multi_int_induction( |
Arnold Schwaighofer | 2e7a922 | 2013-05-14 00:21:18 +0000 | [diff] [blame] | 7 | ; CHECK: vector.body: |
| 8 | ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] |
| 9 | ; CHECK: %normalized.idx = sub i64 %index, 0 |
| 10 | ; CHECK: %[[VAR:.*]] = trunc i64 %normalized.idx to i32 |
| 11 | ; CHECK: %offset.idx = add i32 190, %[[VAR]] |
| 12 | define void @multi_int_induction(i32* %A, i32 %N) { |
| 13 | for.body.lr.ph: |
| 14 | br label %for.body |
| 15 | |
| 16 | for.body: |
| 17 | %indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ] |
| 18 | %count.09 = phi i32 [ 190, %for.body.lr.ph ], [ %inc, %for.body ] |
| 19 | %arrayidx2 = getelementptr inbounds i32* %A, i64 %indvars.iv |
| 20 | store i32 %count.09, i32* %arrayidx2, align 4 |
| 21 | %inc = add nsw i32 %count.09, 1 |
| 22 | %indvars.iv.next = add i64 %indvars.iv, 1 |
| 23 | %lftr.wideiv = trunc i64 %indvars.iv.next to i32 |
| 24 | %exitcond = icmp ne i32 %lftr.wideiv, %N |
| 25 | br i1 %exitcond, label %for.body, label %for.end |
| 26 | |
| 27 | for.end: |
| 28 | ret void |
| 29 | } |
| 30 | |
Arnold Schwaighofer | a846a7f | 2013-11-01 22:18:19 +0000 | [diff] [blame] | 31 | ; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=2 -instcombine -S | FileCheck %s --check-prefix=IND |
| 32 | |
| 33 | ; Make sure we remove unneeded vectorization of induction variables. |
| 34 | ; In order for instcombine to cleanup the vectorized induction variables that we |
| 35 | ; create in the loop vectorizer we need to perform some form of redundancy |
| 36 | ; elimination to get rid of multiple uses. |
| 37 | |
| 38 | ; IND-LABEL: scalar_use |
| 39 | |
| 40 | ; IND: br label %vector.body |
| 41 | ; IND: vector.body: |
| 42 | ; Vectorized induction variable. |
| 43 | ; IND-NOT: insertelement <2 x i64> |
| 44 | ; IND-NOT: shufflevector <2 x i64> |
| 45 | ; IND: br {{.*}}, label %vector.body |
| 46 | |
| 47 | define void @scalar_use(float* %a, float %b, i64 %offset, i64 %offset2, i64 %n) { |
| 48 | entry: |
| 49 | br label %for.body |
| 50 | |
| 51 | for.body: |
| 52 | %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] |
| 53 | %ind.sum = add i64 %iv, %offset |
| 54 | %arr.idx = getelementptr inbounds float* %a, i64 %ind.sum |
| 55 | %l1 = load float* %arr.idx, align 4 |
| 56 | %ind.sum2 = add i64 %iv, %offset2 |
| 57 | %arr.idx2 = getelementptr inbounds float* %a, i64 %ind.sum2 |
| 58 | %l2 = load float* %arr.idx2, align 4 |
| 59 | %m = fmul fast float %b, %l2 |
| 60 | %ad = fadd fast float %l1, %m |
| 61 | store float %ad, float* %arr.idx, align 4 |
| 62 | %iv.next = add nuw nsw i64 %iv, 1 |
| 63 | %exitcond = icmp eq i64 %iv.next, %n |
| 64 | br i1 %exitcond, label %loopexit, label %for.body |
| 65 | |
| 66 | loopexit: |
| 67 | ret void |
| 68 | } |