New Loop Distribution pass

Summary:
This implements the initial version as was proposed earlier this year
(http://lists.cs.uiuc.edu/pipermail/llvmdev/2015-January/080462.html).
Since then Loop Access Analysis was split out from the Loop Vectorizer
and was made into a separate analysis pass.  Loop Distribution becomes
the second user of this analysis.

The pass is off by default and can be enabled
with -enable-loop-distribution.  There is currently no notion of
profitability; if there is a loop with dependence cycles, the pass will
try to split them off from other memory operations into a separate loop.

I decided to remove the control-dependence calculation from this first
version.  This and the issues with the PDT are actively discussed so it
probably makes sense to treat it separately.  Right now I just mark all
terminator instruction required which keeps identical CFGs for each
distributed loop.  This seems to be working pretty well for 456.hmmer
where even though there is an empty if-then block in the distributed
loop initially, it gets completely removed.

The pass keeps DominatorTree and LoopInfo updated.  I've tested this
with -loop-distribute-verify with the testsuite where we distribute ~90
loops.  SimplifyLoop is violated in some cases and I have a FIXME
covering this.

Reviewers: hfinkel, nadav, aschwaighofer

Reviewed By: aschwaighofer

Subscribers: llvm-commits

Differential Revision: http://reviews.llvm.org/D8831

llvm-svn: 237358
diff --git a/llvm/test/Transforms/LoopDistribute/basic-with-memchecks.ll b/llvm/test/Transforms/LoopDistribute/basic-with-memchecks.ll
new file mode 100644
index 0000000..4c1c1b8
--- /dev/null
+++ b/llvm/test/Transforms/LoopDistribute/basic-with-memchecks.ll
@@ -0,0 +1,112 @@
+; RUN: opt -basicaa -loop-distribute -verify-loop-info -verify-dom-info -S \
+; RUN:   < %s | FileCheck %s
+
+; RUN: opt -basicaa -loop-distribute -loop-vectorize -force-vector-width=4 \
+; RUN:   -verify-loop-info -verify-dom-info -S < %s | \
+; RUN:   FileCheck --check-prefix=VECTORIZE %s
+
+; The memcheck version of basic.ll.  We should distribute and vectorize the
+; second part of this loop with 5 memchecks (A+1 x {C, D, E} + C x {A, B})
+;
+;   for (i = 0; i < n; i++) {
+;     A[i + 1] = A[i] * B[i];
+; -------------------------------
+;     C[i] = D[i] * E[i];
+;   }
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.10.0"
+
+@B = common global i32* null, align 8
+@A = common global i32* null, align 8
+@C = common global i32* null, align 8
+@D = common global i32* null, align 8
+@E = common global i32* null, align 8
+
+define void @f() {
+entry:
+  %a = load i32*, i32** @A, align 8
+  %b = load i32*, i32** @B, align 8
+  %c = load i32*, i32** @C, align 8
+  %d = load i32*, i32** @D, align 8
+  %e = load i32*, i32** @E, align 8
+  br label %for.body
+
+; We have two compares for each array overlap check which is a total of 10
+; compares.
+;
+; CHECK: for.body.ldist.memcheck:
+; CHECK:     = icmp
+; CHECK:     = icmp
+
+; CHECK:     = icmp
+; CHECK:     = icmp
+
+; CHECK:     = icmp
+; CHECK:     = icmp
+
+; CHECK:     = icmp
+; CHECK:     = icmp
+
+; CHECK:     = icmp
+; CHECK:     = icmp
+
+; CHECK-NOT: = icmp
+; CHECK:     br i1 %memcheck.conflict, label %for.body.ph.ldist.nondist, label %for.body.ph.ldist1
+
+; The non-distributed loop that the memchecks fall back on.
+
+; CHECK: for.body.ph.ldist.nondist:
+; CHECK:     br label %for.body.ldist.nondist
+; CHECK: for.body.ldist.nondist:
+; CHECK:    br i1 %exitcond.ldist.nondist, label %for.end, label %for.body.ldist.nondist
+
+; Verify the two distributed loops.
+
+; CHECK: for.body.ph.ldist1:
+; CHECK:     br label %for.body.ldist1
+; CHECK: for.body.ldist1:
+; CHECK:    %mulA.ldist1 = mul i32 %loadB.ldist1, %loadA.ldist1
+; CHECK:    br i1 %exitcond.ldist1, label %for.body.ph, label %for.body.ldist1
+
+; CHECK: for.body.ph:
+; CHECK:    br label %for.body
+; CHECK: for.body:
+; CHECK:    %mulC = mul i32 %loadD, %loadE
+; CHECK: for.end:
+
+
+; VECTORIZE: mul <4 x i32>
+
+for.body:                                         ; preds = %for.body, %entry
+  %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
+
+  %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
+  %loadA = load i32, i32* %arrayidxA, align 4
+
+  %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
+  %loadB = load i32, i32* %arrayidxB, align 4
+
+  %mulA = mul i32 %loadB, %loadA
+
+  %add = add nuw nsw i64 %ind, 1
+  %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
+  store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+
+  %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
+  %loadD = load i32, i32* %arrayidxD, align 4
+
+  %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
+  %loadE = load i32, i32* %arrayidxE, align 4
+
+  %mulC = mul i32 %loadD, %loadE
+
+  %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
+  store i32 %mulC, i32* %arrayidxC, align 4
+
+  %exitcond = icmp eq i64 %add, 20
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body
+  ret void
+}
diff --git a/llvm/test/Transforms/LoopDistribute/basic.ll b/llvm/test/Transforms/LoopDistribute/basic.ll
new file mode 100644
index 0000000..1331e09
--- /dev/null
+++ b/llvm/test/Transforms/LoopDistribute/basic.ll
@@ -0,0 +1,84 @@
+; RUN: opt -basicaa -loop-distribute -verify-loop-info -verify-dom-info -S \
+; RUN:   < %s | FileCheck %s
+
+; RUN: opt -basicaa -loop-distribute -verify-loop-info -verify-dom-info \
+; RUN:   -loop-accesses -analyze < %s | FileCheck %s --check-prefix=ANALYSIS
+
+; RUN: opt -basicaa -loop-distribute -loop-vectorize -force-vector-width=4 -S \
+; RUN:   < %s | FileCheck %s --check-prefix=VECTORIZE
+
+; We should distribute this loop into a safe (2nd statement) and unsafe loop
+; (1st statement):
+;   for (i = 0; i < n; i++) {
+;     A[i + 1] = A[i] * B[i];
+;     =======================
+;     C[i] = D[i] * E[i];
+;   }
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.10.0"
+
+define void @f(i32* noalias %a,
+               i32* noalias %b,
+               i32* noalias %c,
+               i32* noalias %d,
+               i32* noalias %e) {
+entry:
+  br label %for.body
+
+; Verify the two distributed loops.
+
+; CHECK: entry.split.ldist1:
+; CHECK:    br label %for.body.ldist1
+; CHECK: for.body.ldist1:
+; CHECK:    %mulA.ldist1 = mul i32 %loadB.ldist1, %loadA.ldist1
+; CHECK:    br i1 %exitcond.ldist1, label %entry.split, label %for.body.ldist1
+
+; CHECK: entry.split:
+; CHECK:    br label %for.body
+; CHECK: for.body:
+; CHECK:    %mulC = mul i32 %loadD, %loadE
+; CHECK: for.end:
+
+
+; ANALYSIS: for.body:
+; ANALYSIS-NEXT: Memory dependences are safe{{$}}
+; ANALYSIS: for.body.ldist1:
+; ANALYSIS-NEXT: Store to invariant address was not found in loop
+; ANALYSIS-NEXT: Report: unsafe dependent memory operations in loop
+
+
+; VECTORIZE: mul <4 x i32>
+
+for.body:                                         ; preds = %for.body, %entry
+  %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
+
+  %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
+  %loadA = load i32, i32* %arrayidxA, align 4
+
+  %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
+  %loadB = load i32, i32* %arrayidxB, align 4
+
+  %mulA = mul i32 %loadB, %loadA
+
+  %add = add nuw nsw i64 %ind, 1
+  %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
+  store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+
+  %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
+  %loadD = load i32, i32* %arrayidxD, align 4
+
+  %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
+  %loadE = load i32, i32* %arrayidxE, align 4
+
+  %mulC = mul i32 %loadD, %loadE
+
+  %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
+  store i32 %mulC, i32* %arrayidxC, align 4
+
+  %exitcond = icmp eq i64 %add, 20
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body
+  ret void
+}
diff --git a/llvm/test/Transforms/LoopDistribute/crash-in-memcheck-generation.ll b/llvm/test/Transforms/LoopDistribute/crash-in-memcheck-generation.ll
new file mode 100644
index 0000000..c81ddf5
--- /dev/null
+++ b/llvm/test/Transforms/LoopDistribute/crash-in-memcheck-generation.ll
@@ -0,0 +1,59 @@
+; RUN: opt -basicaa -loop-distribute -loop-vectorize -force-vector-width=4 \
+; RUN:   -verify-loop-info -verify-dom-info -S < %s | FileCheck %s
+
+; If only A and B can alias here, we don't need memchecks to distribute since
+; A and B are in the same partition.  This used to cause a crash in the
+; memcheck generation.
+;
+;   for (i = 0; i < n; i++) {
+;     A[i + 1] = A[i] * B[i];
+; ------------------------------
+;     C[i] = D[i] * E[i];
+;   }
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.10.0"
+
+define void @f(i32*  %a,
+               i32*  %b,
+               i32* noalias %c,
+               i32* noalias %d,
+               i32* noalias %e) {
+entry:
+  br label %for.body
+
+; CHECK-NOT: memcheck:
+; CHECK: mul <4 x i32>
+
+for.body:                                         ; preds = %for.body, %entry
+  %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
+
+  %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
+  %loadA = load i32, i32* %arrayidxA, align 4
+
+  %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
+  %loadB = load i32, i32* %arrayidxB, align 4
+
+  %mulA = mul i32 %loadB, %loadA
+
+  %add = add nuw nsw i64 %ind, 1
+  %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
+  store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+
+  %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
+  %loadD = load i32, i32* %arrayidxD, align 4
+
+  %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
+  %loadE = load i32, i32* %arrayidxE, align 4
+
+  %mulC = mul i32 %loadD, %loadE
+
+  %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
+  store i32 %mulC, i32* %arrayidxC, align 4
+
+  %exitcond = icmp eq i64 %add, 20
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body
+  ret void
+}
diff --git a/llvm/test/Transforms/LoopDistribute/no-if-convert.ll b/llvm/test/Transforms/LoopDistribute/no-if-convert.ll
new file mode 100644
index 0000000..fcd8b65
--- /dev/null
+++ b/llvm/test/Transforms/LoopDistribute/no-if-convert.ll
@@ -0,0 +1,95 @@
+; RUN: opt -basicaa -loop-distribute -verify-loop-info -verify-dom-info -S < %s \
+; RUN:   | FileCheck %s
+
+; We should distribute this loop along === but not along ---.  The last
+; partition won't be vectorized due to conditional stores so it's better to
+; keep it with the second partition which has a dependence cycle.
+
+; (1st statement):
+;   for (i = 0; i < n; i++) {
+;     C[i] = D[i] * E[i];
+;=============================
+;     A[i + 1] = A[i] * B[i];
+;-----------------------------
+;     if (F[i])
+;        G[i] = H[i] * J[i];
+;   }
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.10.0"
+
+define void @f(i32* noalias %a,
+               i32* noalias %b,
+               i32* noalias %c,
+               i32* noalias %d,
+               i32* noalias %e,
+               i32* noalias %g,
+               i32* noalias %h,
+               i32* noalias %j,
+               i64 %x) {
+entry:
+  br label %for.body
+
+; Ensure that we have only two partitions, the first with one multiplication
+; and the second with two.
+
+; CHECK: for.body.ldist1:
+; CHECK:    %mulC.ldist1 = mul i32 %loadD.ldist1, %loadE.ldist1
+; CHECK:    br i1 %exitcond.ldist1, label %entry.split, label %for.body.ldist1
+; CHECK: entry.split:
+; CHECK:    br label %for.body
+; CHECK: for.body:
+; CHECK:    %mulA = mul i32 %loadB, %loadA
+; CHECK:    %mulG = mul i32 %loadH, %loadJ
+; CHECK: for.end:
+
+for.body:                                         ; preds = %for.body, %entry
+  %ind = phi i64 [ 0, %entry ], [ %add, %if.end ]
+
+  %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
+  %loadD = load i32, i32* %arrayidxD, align 4
+
+  %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
+  %loadE = load i32, i32* %arrayidxE, align 4
+
+  %mulC = mul i32 %loadD, %loadE
+
+  %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
+  store i32 %mulC, i32* %arrayidxC, align 4
+
+
+  %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
+  %loadA = load i32, i32* %arrayidxA, align 4
+
+  %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
+  %loadB = load i32, i32* %arrayidxB, align 4
+
+  %mulA = mul i32 %loadB, %loadA
+
+  %add = add nuw nsw i64 %ind, 1
+  %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
+  store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+
+  %if.cond = icmp eq i64 %ind, %x
+  br i1 %if.cond, label %if.then, label %if.end
+
+if.then:
+  %arrayidxH = getelementptr inbounds i32, i32* %h, i64 %ind
+  %loadH = load i32, i32* %arrayidxH, align 4
+
+  %arrayidxJ = getelementptr inbounds i32, i32* %j, i64 %ind
+  %loadJ = load i32, i32* %arrayidxJ, align 4
+
+  %mulG = mul i32 %loadH, %loadJ
+
+  %arrayidxG = getelementptr inbounds i32, i32* %g, i64 %ind
+  store i32 %mulG, i32* %arrayidxG, align 4
+  br label %if.end
+
+if.end:
+  %exitcond = icmp eq i64 %add, 20
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body
+  ret void
+}
diff --git a/llvm/test/Transforms/LoopDistribute/outside-use.ll b/llvm/test/Transforms/LoopDistribute/outside-use.ll
new file mode 100644
index 0000000..546050d
--- /dev/null
+++ b/llvm/test/Transforms/LoopDistribute/outside-use.ll
@@ -0,0 +1,69 @@
+; RUN: opt -loop-distribute -verify-loop-info -verify-dom-info -S < %s \
+; RUN:   | FileCheck %s
+
+; Check that definitions used outside the loop are handled correctly: (1) they
+; are not dropped (2) when version the loop, a phi is added to merge the value
+; from the non-distributed loop and the distributed loop.
+;
+;   for (i = 0; i < n; i++) {
+;     A[i + 1] = A[i] * B[i];
+;   ==========================
+;     sum += C[i];
+;   }
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.10.0"
+
+@B = common global i32* null, align 8
+@A = common global i32* null, align 8
+@C = common global i32* null, align 8
+@D = common global i32* null, align 8
+@E = common global i32* null, align 8
+@SUM = common global i32 0, align 8
+
+define void @f() {
+entry:
+  %a = load i32*, i32** @A, align 8
+  %b = load i32*, i32** @B, align 8
+  %c = load i32*, i32** @C, align 8
+  %d = load i32*, i32** @D, align 8
+  %e = load i32*, i32** @E, align 8
+
+  br label %for.body
+
+; CHECK: for.body.ldist1:
+; CHECK:   %mulA.ldist1 = mul i32 %loadB.ldist1, %loadA.ldist1
+; CHECK: for.body.ph:
+; CHECK: for.body:
+; CHECK:   %sum_add = add nuw nsw i32 %sum, %loadC
+; CHECK: for.end:
+; CHECK:   %sum_add.ldist = phi i32 [ %sum_add, %for.body ], [ %sum_add.ldist.nondist, %for.body.ldist.nondist ]
+
+for.body:                                         ; preds = %for.body, %entry
+  %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
+  %sum = phi i32 [ 0, %entry ], [ %sum_add, %for.body ]
+
+  %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
+  %loadA = load i32, i32* %arrayidxA, align 4
+
+  %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
+  %loadB = load i32, i32* %arrayidxB, align 4
+
+  %mulA = mul i32 %loadB, %loadA
+
+  %add = add nuw nsw i64 %ind, 1
+  %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
+  store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+
+  %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
+  %loadC = load i32, i32* %arrayidxC, align 4
+
+  %sum_add = add nuw nsw i32 %sum, %loadC
+
+  %exitcond = icmp eq i64 %add, 20
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body
+  store i32 %sum_add, i32* @SUM, align 4
+  ret void
+}
diff --git a/llvm/test/Transforms/LoopDistribute/program-order.ll b/llvm/test/Transforms/LoopDistribute/program-order.ll
new file mode 100644
index 0000000..b534b79
--- /dev/null
+++ b/llvm/test/Transforms/LoopDistribute/program-order.ll
@@ -0,0 +1,65 @@
+; RUN: opt -loop-distribute -S -verify-loop-info -verify-dom-info < %s \
+; RUN:   | FileCheck %s
+
+; Distributing this loop to avoid the dependence cycle would require to
+; reorder S1 and S2 to form the two partitions: {S2} | {S1, S3}.  The analysis
+; provided by LoopAccessAnalysis does not allow us to reorder memory
+; operations so make sure we bail on this loop.
+;
+;   for (i = 0; i < n; i++) {
+;     S1: d = D[i];
+;     S2: A[i + 1] = A[i] * B[i];
+;     S3: C[i] = d * E[i];
+;   }
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.10.0"
+
+define void @f(i32* noalias %a,
+               i32* noalias %b,
+               i32* noalias %c,
+               i32* noalias %d,
+               i32* noalias %e) {
+entry:
+  br label %for.body
+
+; CHECK: entry:
+; CHECK:    br label %for.body
+; CHECK: for.body:
+; CHECK:    br i1 %exitcond, label %for.end, label %for.body
+; CHECK: for.end:
+; CHECK:    ret void
+
+for.body:                                         ; preds = %for.body, %entry
+  %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
+
+  %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
+  %loadA = load i32, i32* %arrayidxA, align 4
+
+  %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
+  %loadB = load i32, i32* %arrayidxB, align 4
+
+  %mulA = mul i32 %loadB, %loadA
+
+  %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
+  %loadD = load i32, i32* %arrayidxD, align 4
+
+  %add = add nuw nsw i64 %ind, 1
+  %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
+  store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+
+  %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
+
+  %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
+  %loadE = load i32, i32* %arrayidxE, align 4
+
+  %mulC = mul i32 %loadD, %loadE
+
+  store i32 %mulC, i32* %arrayidxC, align 4
+
+  %exitcond = icmp eq i64 %add, 20
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body
+  ret void
+}