[LoopVectorize] Induction variables: support arbitrary constant step.
Previously, only -1 and +1 step values are supported for induction variables. This patch extends LV to support
arbitrary constant steps.
Initial patch by Alexey Volkov. Some bug fixes are added in the following version.

Differential Revision: http://reviews.llvm.org/D6051 and http://reviews.llvm.org/D7193

llvm-svn: 227557
diff --git a/llvm/test/Transforms/LoopVectorize/arbitrary-induction-step.ll b/llvm/test/Transforms/LoopVectorize/arbitrary-induction-step.ll
new file mode 100644
index 0000000..95734bf
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/arbitrary-induction-step.ll
@@ -0,0 +1,150 @@
+; RUN: opt -S < %s -loop-vectorize 2>&1 | FileCheck %s
+; RUN: opt -S < %s -loop-vectorize -force-vector-interleave=1 -force-vector-width=2 | FileCheck %s --check-prefix=FORCE-VEC
+
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-gnueabi"
+
+; Test integer induction variable of step 2:
+;   for (int i = 0; i < 1024; i+=2) {
+;     int tmp = *A++;
+;     sum += i * tmp;
+;   }
+
+; CHECK-LABEL: @ind_plus2(
+; CHECK: load <4 x i32>*
+; CHECK: load <4 x i32>*
+; CHECK: mul nsw <4 x i32>
+; CHECK: mul nsw <4 x i32>
+; CHECK: add nsw <4 x i32>
+; CHECK: add nsw <4 x i32>
+; CHECK: %index.next = add i64 %index, 8
+; CHECK: icmp eq i64 %index.next, 512
+
+; FORCE-VEC-LABEL: @ind_plus2(
+; FORCE-VEC: %wide.load = load <2 x i32>*
+; FORCE-VEC: mul nsw <2 x i32>
+; FORCE-VEC: add nsw <2 x i32>
+; FORCE-VEC: %index.next = add i64 %index, 2
+; FORCE-VEC: icmp eq i64 %index.next, 512
+define i32 @ind_plus2(i32* %A) {
+entry:
+  br label %for.body
+
+for.body:                                         ; preds = %entry, %for.body
+  %A.addr = phi i32* [ %A, %entry ], [ %inc.ptr, %for.body ]
+  %i = phi i32 [ 0, %entry ], [ %add1, %for.body ]
+  %sum = phi i32 [ 0, %entry ], [ %add, %for.body ]
+  %inc.ptr = getelementptr inbounds i32* %A.addr, i64 1
+  %0 = load i32* %A.addr, align 4
+  %mul = mul nsw i32 %0, %i
+  %add = add nsw i32 %mul, %sum
+  %add1 = add nsw i32 %i, 2
+  %cmp = icmp slt i32 %add1, 1024
+  br i1 %cmp, label %for.body, label %for.end
+
+for.end:                                          ; preds = %for.body
+  %add.lcssa = phi i32 [ %add, %for.body ]
+  ret i32 %add.lcssa
+}
+
+
+; Test integer induction variable of step -2:
+;   for (int i = 1024; i > 0; i-=2) {
+;     int tmp = *A++;
+;     sum += i * tmp;
+;   }
+
+; CHECK-LABEL: @ind_minus2(
+; CHECK: load <4 x i32>*
+; CHECK: load <4 x i32>*
+; CHECK: mul nsw <4 x i32>
+; CHECK: mul nsw <4 x i32>
+; CHECK: add nsw <4 x i32>
+; CHECK: add nsw <4 x i32>
+; CHECK: %index.next = add i64 %index, 8
+; CHECK: icmp eq i64 %index.next, 512
+
+; FORCE-VEC-LABEL: @ind_minus2(
+; FORCE-VEC: %wide.load = load <2 x i32>*
+; FORCE-VEC: mul nsw <2 x i32>
+; FORCE-VEC: add nsw <2 x i32>
+; FORCE-VEC: %index.next = add i64 %index, 2
+; FORCE-VEC: icmp eq i64 %index.next, 512
+define i32 @ind_minus2(i32* %A) {
+entry:
+  br label %for.body
+
+for.body:                                         ; preds = %entry, %for.body
+  %A.addr = phi i32* [ %A, %entry ], [ %inc.ptr, %for.body ]
+  %i = phi i32 [ 1024, %entry ], [ %sub, %for.body ]
+  %sum = phi i32 [ 0, %entry ], [ %add, %for.body ]
+  %inc.ptr = getelementptr inbounds i32* %A.addr, i64 1
+  %0 = load i32* %A.addr, align 4
+  %mul = mul nsw i32 %0, %i
+  %add = add nsw i32 %mul, %sum
+  %sub = add nsw i32 %i, -2
+  %cmp = icmp sgt i32 %i, 2
+  br i1 %cmp, label %for.body, label %for.end
+
+for.end:                                          ; preds = %for.body
+  %add.lcssa = phi i32 [ %add, %for.body ]
+  ret i32 %add.lcssa
+}
+
+
+; Test pointer induction variable of step 2. As currently we don't support
+; masked load/store, vectorization is possible but not beneficial. If loop
+; vectorization is not enforced, LV will only do interleave.
+;   for (int i = 0; i < 1024; i++) {
+;     int tmp0 = *A++;
+;     int tmp1 = *A++;
+;     sum += tmp0 * tmp1;
+;   }
+
+; CHECK-LABEL: @ptr_ind_plus2(
+; CHECK: load i32*
+; CHECK: load i32*
+; CHECK: load i32*
+; CHECK: load i32*
+; CHECK: mul nsw i32
+; CHECK: mul nsw i32
+; CHECK: add nsw i32
+; CHECK: add nsw i32
+; CHECK: %index.next = add i64 %index, 2
+; CHECK: %21 = icmp eq i64 %index.next, 1024
+
+; FORCE-VEC-LABEL: @ptr_ind_plus2(
+; FORCE-VEC: load i32*
+; FORCE-VEC: insertelement <2 x i32>
+; FORCE-VEC: load i32*
+; FORCE-VEC: insertelement <2 x i32>
+; FORCE-VEC: load i32*
+; FORCE-VEC: insertelement <2 x i32>
+; FORCE-VEC: load i32*
+; FORCE-VEC: insertelement <2 x i32>
+; FORCE-VEC: mul nsw <2 x i32>
+; FORCE-VEC: add nsw <2 x i32>
+; FORCE-VEC: %index.next = add i64 %index, 2
+; FORCE-VEC: icmp eq i64 %index.next, 1024
+define i32 @ptr_ind_plus2(i32* %A) {
+entry:
+  br label %for.body
+
+for.body:                                         ; preds = %for.body, %entry
+  %A.addr = phi i32* [ %A, %entry ], [ %inc.ptr1, %for.body ]
+  %sum = phi i32 [ 0, %entry ], [ %add, %for.body ]
+  %i = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+  %inc.ptr = getelementptr inbounds i32* %A.addr, i64 1
+  %0 = load i32* %A.addr, align 4
+  %inc.ptr1 = getelementptr inbounds i32* %A.addr, i64 2
+  %1 = load i32* %inc.ptr, align 4
+  %mul = mul nsw i32 %1, %0
+  %add = add nsw i32 %mul, %sum
+  %inc = add nsw i32 %i, 1
+  %exitcond = icmp eq i32 %inc, 1024
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body
+  %add.lcssa = phi i32 [ %add, %for.body ]
+  ret i32 %add.lcssa
+}
diff --git a/llvm/test/Transforms/LoopVectorize/gcc-examples.ll b/llvm/test/Transforms/LoopVectorize/gcc-examples.ll
index 6c8af0b..6a2c2c6 100644
--- a/llvm/test/Transforms/LoopVectorize/gcc-examples.ll
+++ b/llvm/test/Transforms/LoopVectorize/gcc-examples.ll
@@ -388,9 +388,8 @@
   ret void
 }
 
-; Can't vectorize because of reductions.
 ;CHECK-LABEL: @example13(
-;CHECK-NOT: <4 x i32>
+;CHECK: <4 x i32>
 ;CHECK: ret void
 define void @example13(i32** nocapture %A, i32** nocapture %B, i32* nocapture %out) nounwind uwtable ssp {
   br label %.preheader
diff --git a/llvm/test/Transforms/LoopVectorize/reverse_induction.ll b/llvm/test/Transforms/LoopVectorize/reverse_induction.ll
index da02d01..d379606 100644
--- a/llvm/test/Transforms/LoopVectorize/reverse_induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/reverse_induction.ll
@@ -97,7 +97,7 @@
 ; CHECK: vector.body
 ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
 ; CHECK: %normalized.idx = sub i64 %index, 0
-; CHECK: %reverse.idx = sub i64 1023, %normalized.idx
+; CHECK: %offset.idx = sub i64 1023, %normalized.idx
 ; CHECK: trunc i64 %index to i8
 
 define void @reverse_forward_induction_i64_i8() {
@@ -124,7 +124,7 @@
 ; CHECK: vector.body:
 ; CHECK:  %index = phi i64 [ 129, %vector.ph ], [ %index.next, %vector.body ]
 ; CHECK:  %normalized.idx = sub i64 %index, 129
-; CHECK:  %reverse.idx = sub i64 1023, %normalized.idx
+; CHECK:  %offset.idx = sub i64 1023, %normalized.idx
 ; CHECK:  trunc i64 %index to i8
 
 define void @reverse_forward_induction_i64_i8_signed() {