[LSR] canonicalize Prod*(1<<C) to Prod<<C
Summary:
Because LSR happens at a late stage where mul of a power of 2 is
typically canonicalized to shl, this canonicalization emits code that
can be better CSE'ed.
Test Plan:
Transforms/LoopStrengthReduce/shl.ll shows how this change makes GVN more
powerful. Fixes some existing tests due to this change.
Reviewers: sanjoy, majnemer, atrick
Reviewed By: majnemer, atrick
Subscribers: majnemer, llvm-commits
Differential Revision: http://reviews.llvm.org/D10448
llvm-svn: 240573
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2011-10-06-ReusePhi.ll b/llvm/test/Transforms/LoopStrengthReduce/2011-10-06-ReusePhi.ll
index cc8226e..5923a42 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2011-10-06-ReusePhi.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2011-10-06-ReusePhi.ll
@@ -12,8 +12,8 @@
; CHECK-LABEL: @test(
; multiplies are hoisted out of the loop
; CHECK: while.body.lr.ph:
-; CHECK: mul i64
-; CHECK: mul i64
+; CHECK: shl i64
+; CHECK: shl i64
; GEPs are ugly
; CHECK: while.body:
; CHECK: phi
diff --git a/llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll b/llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll
index 092b274..466566e 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll
@@ -6,7 +6,7 @@
; CHECK: [[r1:%[a-z0-9]+]] = sub i64 %sub.ptr.lhs.cast, %sub.ptr.rhs.cast
; CHECK: [[r2:%[a-z0-9]+]] = lshr i64 [[r1]], 1
-; CHECK: [[r3:%[a-z0-9]+]] = mul i64 [[r2]], 2
+; CHECK: [[r3:%[a-z0-9]+]] = shl i64 [[r2]], 1
; CHECK: br label %for.body
; CHECK: for.body:
; CHECK: %lsr.iv2 = phi i64 [ %lsr.iv.next, %for.body ], [ [[r3]], %for.body.lr.ph ]
diff --git a/llvm/test/Transforms/LoopStrengthReduce/shl.ll b/llvm/test/Transforms/LoopStrengthReduce/shl.ll
new file mode 100644
index 0000000..bb9cb39
--- /dev/null
+++ b/llvm/test/Transforms/LoopStrengthReduce/shl.ll
@@ -0,0 +1,38 @@
+; RUN: opt < %s -loop-reduce -gvn -S | FileCheck %s
+
+target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
+
+define void @_Z3fooPfll(float* nocapture readonly %input, i64 %n, i64 %s) {
+; CHECK-LABEL: @_Z3fooPfll(
+entry:
+ %mul = shl nsw i64 %s, 2
+; CHECK: %mul = shl i64 %s, 2
+ tail call void @_Z3bazl(i64 %mul) #2
+; CHECK-NEXT: call void @_Z3bazl(i64 %mul)
+ %cmp.5 = icmp sgt i64 %n, 0
+ br i1 %cmp.5, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader: ; preds = %entry
+ br label %for.body
+
+for.cond.cleanup.loopexit: ; preds = %for.body
+ br label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
+ ret void
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %i.06 = phi i64 [ %add, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds float, float* %input, i64 %i.06
+; LoopStrengthReduce should reuse %mul as the stride.
+; CHECK: getelementptr i1, i1* {{[^,]+}}, i64 %mul
+ %0 = load float, float* %arrayidx, align 4
+ tail call void @_Z3barf(float %0) #2
+ %add = add nsw i64 %i.06, %s
+ %cmp = icmp slt i64 %add, %n
+ br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit
+}
+
+declare void @_Z3bazl(i64)
+
+declare void @_Z3barf(float)