[ARM][ParallelDSP] Enable multiple uses of loads
    
When choosing whether a pair of loads can be combined into a single
wide load, we check that the load only has a sext user and that sext
also only has one user. But this can prevent the transformation in
the cases when parallel macs use the same loaded data multiple times.
    
To enable this, we need to fix up any other uses after creating the
wide load: generating a trunc and a shift + trunc pair to recreate
the narrow values. We also need to keep a record of which loads have
already been widened.

Differential Revision: https://reviews.llvm.org/D59215

llvm-svn: 356132
diff --git a/llvm/test/CodeGen/ARM/ParallelDSP/multi-use-loads.ll b/llvm/test/CodeGen/ARM/ParallelDSP/multi-use-loads.ll
new file mode 100644
index 0000000..3c19068
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/ParallelDSP/multi-use-loads.ll
@@ -0,0 +1,251 @@
+; RUN: llc -O3 -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s | FileCheck %s
+
+; CHECK-LABEL: add_user
+; CHECK: %for.body
+; CHECK: ldr [[A:r[0-9]+]],{{.*}}, #2]!
+; CHECK: ldr [[B:r[0-9]+]],{{.*}}, #2]!
+; CHECK: smlad [[ACC:r[0-9]+]], [[B]], [[A]], [[ACC]]
+; CHECK: sxtah [[COUNT:r[0-9]+]], [[COUNT]], [[A]]
+define i32 @add_user(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+entry:
+  %cmp24 = icmp sgt i32 %arg, 0
+  br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %.pre = load i16, i16* %arg3, align 2
+  %.pre27 = load i16, i16* %arg2, align 2
+  br label %for.body
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add11, %for.body ]
+  %count.final = phi i32 [ 0, %entry ], [ %count.next, %for.body ]
+  %res = add i32 %mac1.0.lcssa, %count.final
+  ret i32 %res
+
+for.body:
+  %mac1.026 = phi i32 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+  %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+  %count = phi i32 [ %count.next, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+  %0 = load i16, i16* %arrayidx, align 2
+  %add = add nuw nsw i32 %i.025, 1
+  %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+  %1 = load i16, i16* %arrayidx1, align 2
+  %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+  %2 = load i16, i16* %arrayidx3, align 2
+  %conv = sext i16 %2 to i32
+  %conv4 = sext i16 %0 to i32
+  %count.next = add i32 %conv4, %count
+  %mul = mul nsw i32 %conv, %conv4
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+  %3 = load i16, i16* %arrayidx6, align 2
+  %conv7 = sext i16 %3 to i32
+  %conv8 = sext i16 %1 to i32
+  %mul9 = mul nsw i32 %conv7, %conv8
+  %add10 = add i32 %mul, %mac1.026
+  %add11 = add i32 %mul9, %add10
+  %exitcond = icmp ne i32 %add, %arg
+  br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+
+; CHECK-LABEL: mul_bottom_user
+; CHECK: %for.body
+; CHECK: ldr [[A:r[0-9]+]],{{.*}}, #2]!
+; CHECK: ldr [[B:r[0-9]+]],{{.*}}, #2]!
+; CHECK: smlad [[ACC:r[0-9]+]], [[B]], [[A]], [[ACC]]
+; CHECK: sxth [[SXT:r[0-9]+]], [[A]]
+; CHECK: mul [[COUNT:r[0-9]+]], [[SXT]], [[COUNT]]
+define i32 @mul_bottom_user(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+entry:
+  %cmp24 = icmp sgt i32 %arg, 0
+  br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %.pre = load i16, i16* %arg3, align 2
+  %.pre27 = load i16, i16* %arg2, align 2
+  br label %for.body
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add11, %for.body ]
+  %count.final = phi i32 [ 0, %entry ], [ %count.next, %for.body ]
+  %res = add i32 %mac1.0.lcssa, %count.final
+  ret i32 %res
+
+for.body:
+  %mac1.026 = phi i32 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+  %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+  %count = phi i32 [ %count.next, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+  %0 = load i16, i16* %arrayidx, align 2
+  %add = add nuw nsw i32 %i.025, 1
+  %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+  %1 = load i16, i16* %arrayidx1, align 2
+  %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+  %2 = load i16, i16* %arrayidx3, align 2
+  %conv = sext i16 %2 to i32
+  %conv4 = sext i16 %0 to i32
+  %mul = mul nsw i32 %conv, %conv4
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+  %3 = load i16, i16* %arrayidx6, align 2
+  %conv7 = sext i16 %3 to i32
+  %conv8 = sext i16 %1 to i32
+  %mul9 = mul nsw i32 %conv7, %conv8
+  %add10 = add i32 %mul, %mac1.026
+  %add11 = add i32 %mul9, %add10
+  %count.next = mul i32 %conv4, %count
+  %exitcond = icmp ne i32 %add, %arg
+  br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+
+; CHECK-LABEL: mul_top_user
+; CHECK: %for.body
+; CHECK: ldr [[A:[rl0-9]+]],{{.*}}, #2]!
+; CHECK: ldr [[B:[rl0-9]+]],{{.*}}, #2]!
+; CHECK: smlad [[ACC:[rl0-9]+]], [[B]], [[A]], [[ACC]]
+; CHECK: asr.w [[ASR:[rl0-9]+]], [[ASR]], #16
+; CHECK: mul [[COUNT:[rl0-9]+]], [[ASR]], [[COUNT]]
+define i32 @mul_top_user(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+entry:
+  %cmp24 = icmp sgt i32 %arg, 0
+  br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %.pre = load i16, i16* %arg3, align 2
+  %.pre27 = load i16, i16* %arg2, align 2
+  br label %for.body
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add11, %for.body ]
+  %count.final = phi i32 [ 0, %entry ], [ %count.next, %for.body ]
+  %res = add i32 %mac1.0.lcssa, %count.final
+  ret i32 %res
+
+for.body:
+  %mac1.026 = phi i32 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+  %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+  %count = phi i32 [ %count.next, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+  %0 = load i16, i16* %arrayidx, align 2
+  %add = add nuw nsw i32 %i.025, 1
+  %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+  %1 = load i16, i16* %arrayidx1, align 2
+  %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+  %2 = load i16, i16* %arrayidx3, align 2
+  %conv = sext i16 %2 to i32
+  %conv4 = sext i16 %0 to i32
+  %mul = mul nsw i32 %conv, %conv4
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+  %3 = load i16, i16* %arrayidx6, align 2
+  %conv7 = sext i16 %3 to i32
+  %conv8 = sext i16 %1 to i32
+  %mul9 = mul nsw i32 %conv7, %conv8
+  %add10 = add i32 %mul, %mac1.026
+  %add11 = add i32 %mul9, %add10
+  %count.next = mul i32 %conv7, %count
+  %exitcond = icmp ne i32 %add, %arg
+  br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+
+; CHECK-LABEL: and_user
+; CHECK: %for.body
+; CHECK: ldr [[A:r[0-9]+]],{{.*}}, #2]!
+; CHECK: ldr [[B:r[0-9]+]],{{.*}}, #2]!
+; CHECK: smlad [[ACC:r[0-9]+]], [[B]], [[A]], [[ACC]]
+; CHECK: uxth [[UXT:r[0-9]+]], [[A]]
+; CHECK: mul [[MUL:r[0-9]+]], [[UXT]], [[MUL]]
+define i32 @and_user(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+entry:
+  %cmp24 = icmp sgt i32 %arg, 0
+  br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %.pre = load i16, i16* %arg3, align 2
+  %.pre27 = load i16, i16* %arg2, align 2
+  br label %for.body
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add11, %for.body ]
+  %count.final = phi i32 [ 0, %entry ], [ %count.next, %for.body ]
+  %res = add i32 %mac1.0.lcssa, %count.final
+  ret i32 %res
+
+for.body:
+  %mac1.026 = phi i32 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+  %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+  %count = phi i32 [ %count.next, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+  %0 = load i16, i16* %arrayidx, align 2
+  %add = add nuw nsw i32 %i.025, 1
+  %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+  %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+  %1 = load i16, i16* %arrayidx1, align 2
+  %2 = load i16, i16* %arrayidx3, align 2
+  %conv = sext i16 %2 to i32
+  %conv4 = sext i16 %0 to i32
+  %bottom = and i32 %conv4, 65535
+  %mul = mul nsw i32 %conv, %conv4
+  %3 = load i16, i16* %arrayidx6, align 2
+  %conv7 = sext i16 %3 to i32
+  %conv8 = sext i16 %1 to i32
+  %mul9 = mul nsw i32 %conv7, %conv8
+  %add10 = add i32 %mul, %mac1.026
+  %add11 = add i32 %mul9, %add10
+  %count.next = mul i32 %bottom, %count
+  %exitcond = icmp ne i32 %add, %arg
+  br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+
+; CHECK-LABEL: multi_uses
+; CHECK: %for.body
+; CHECK: ldr [[A:r[0-9]+]], [{{.*}}, #2]!
+; CHECK: ldr [[B:r[0-9]+]], [{{.*}}, #2]!
+; CHECK: smlad [[ACC:[rl0-9]+]], [[B]], [[A]], [[ACC]]
+; CHECK: sxth [[SXT:r[0-9]+]], [[A]]
+; CHECK: eor.w [[EOR:r[0-9]+]], [[SXT]], [[SHIFT:r[0-9]+]]
+; CHECK: mul [[MUL:r[0-9]+]], [[EOR]], [[SXT]]
+; CHECK: lsl.w [[SHIFT]], [[MUL]], #16
+define i32 @multi_uses(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+entry:
+  %cmp24 = icmp sgt i32 %arg, 0
+  br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+  %.pre = load i16, i16* %arg3, align 2
+  %.pre27 = load i16, i16* %arg2, align 2
+  br label %for.body
+
+for.cond.cleanup:
+  %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add11, %for.body ]
+  %count.final = phi i32 [ 0, %entry ], [ %count.next, %for.body ]
+  %res = add i32 %mac1.0.lcssa, %count.final
+  ret i32 %res
+
+for.body:
+  %mac1.026 = phi i32 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+  %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+  %count = phi i32 [ %count.next, %for.body ], [ 0, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+  %0 = load i16, i16* %arrayidx, align 2
+  %add = add nuw nsw i32 %i.025, 1
+  %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+  %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+  %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+  %1 = load i16, i16* %arrayidx1, align 2
+  %2 = load i16, i16* %arrayidx3, align 2
+  %conv = sext i16 %2 to i32
+  %conv4 = sext i16 %0 to i32
+  %bottom = and i32 %conv4, 65535
+  %mul = mul nsw i32 %conv, %conv4
+  %3 = load i16, i16* %arrayidx6, align 2
+  %conv7 = sext i16 %3 to i32
+  %conv8 = sext i16 %1 to i32
+  %mul9 = mul nsw i32 %conv7, %conv8
+  %add10 = add i32 %mul, %mac1.026
+  %shl = shl i32 %conv4, 16
+  %add11 = add i32 %mul9, %add10
+  %xor = xor i32 %bottom, %count
+  %count.next = mul i32 %xor, %shl
+  %exitcond = icmp ne i32 %add, %arg
+  br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
diff --git a/llvm/test/CodeGen/ARM/ParallelDSP/smlad0.ll b/llvm/test/CodeGen/ARM/ParallelDSP/smlad0.ll
index 477f565..d3bf51f 100644
--- a/llvm/test/CodeGen/ARM/ParallelDSP/smlad0.ll
+++ b/llvm/test/CodeGen/ARM/ParallelDSP/smlad0.ll
@@ -210,3 +210,4 @@
   %exitcond = icmp ne i32 %add, %arg
   br i1 %exitcond, label %for.body, label %for.cond.cleanup
 }
+
diff --git a/llvm/test/CodeGen/ARM/ParallelDSP/unroll-n-jam-smlad.ll b/llvm/test/CodeGen/ARM/ParallelDSP/unroll-n-jam-smlad.ll
new file mode 100644
index 0000000..cb51bc5
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/ParallelDSP/unroll-n-jam-smlad.ll
@@ -0,0 +1,217 @@
+; RUN: llc -O3 -mtriple=thumbv7em %s -o - | FileCheck %s
+; RUN: llc -O3 -mtriple=thumbv8m.main -mattr=+dsp %s -o - | FileCheck %s
+
+; Test that the duplicate loads are removed, which allows parallel dsp to find
+; the parallel operations.
+
+define void @unroll_n_jam_smlad(i32* %res, i16* %A, i16* %B, i32 %N, i32 %idx) {
+entry:
+  %xtraiter306.i = and i32 %N, 3
+  %unroll_iter310.i = sub i32 %N, %xtraiter306.i
+  %arrayidx.us.i117.i = getelementptr inbounds i32, i32* %res, i32 %idx
+  store i32 0, i32* %arrayidx.us.i117.i, align 4
+  %mul.us.i118.i = mul i32 %idx, %N
+  %inc11.us.i.i = or i32 %idx, 1
+  %arrayidx.us.i117.1.i = getelementptr inbounds i32, i32* %res, i32 %inc11.us.i.i
+  store i32 0, i32* %arrayidx.us.i117.1.i, align 4
+  %mul.us.i118.1.i = mul i32 %inc11.us.i.i, %N
+  %inc11.us.i.1.i = or i32 %idx, 2
+  %arrayidx.us.i117.2.i = getelementptr inbounds i32, i32* %res, i32 %inc11.us.i.1.i
+  store i32 0, i32* %arrayidx.us.i117.2.i, align 4
+  %mul.us.i118.2.i = mul i32 %inc11.us.i.1.i, %N
+  %inc11.us.i.2.i = or i32 %idx, 3
+  %arrayidx.us.i117.3.i = getelementptr inbounds i32, i32* %res, i32 %inc11.us.i.2.i
+  store i32 0, i32* %arrayidx.us.i117.3.i, align 4
+  %mul.us.i118.3.i = mul i32 %inc11.us.i.2.i, %N
+  %inc11.us.i.3.i = add i32 %idx, 4
+  br label %for.body
+
+; CHECK: %for.body
+; CHECK: smlad
+; CHECK: smlad
+; CHECK: smlad
+; CHECK: smlad
+; CHECK: smlad
+; CHECK: smlad
+; CHECK: smlad
+; CHECK: smlad
+
+for.body:
+  %A3 = phi i32 [ %add9.us.i.3361.i, %for.body ], [ 0, %entry ]
+  %j.026.us.i.i = phi i32 [ %inc.us.i.3362.i, %for.body ], [ 0, %entry ]
+  %A4 = phi i32 [ %add9.us.i.1.3.i, %for.body ], [ 0, %entry ]
+  %A5 = phi i32 [ %add9.us.i.2.3.i, %for.body ], [ 0, %entry ]
+  %A6 = phi i32 [ %add9.us.i.3.3.i, %for.body ], [ 0, %entry ]
+  %niter335.i = phi i32 [ %niter335.nsub.3.i, %for.body ], [ %unroll_iter310.i, %entry ]
+  %add.us.i.i = add i32 %j.026.us.i.i, %mul.us.i118.i
+  %arrayidx4.us.i.i = getelementptr inbounds i16, i16* %A, i32 %add.us.i.i
+  %A7 = load i16, i16* %arrayidx4.us.i.i, align 2
+  %conv.us.i.i = sext i16 %A7 to i32
+  %arrayidx5.us.i.i = getelementptr inbounds i16, i16* %B, i32 %j.026.us.i.i
+  %A8 = load i16, i16* %arrayidx5.us.i.i, align 2
+  %conv6.us.i.i = sext i16 %A8 to i32
+  %mul7.us.i.i = mul nsw i32 %conv6.us.i.i, %conv.us.i.i
+  %add9.us.i.i = add nsw i32 %mul7.us.i.i, %A3
+  %inc.us.i.i = or i32 %j.026.us.i.i, 1
+  %add.us.i.1.i = add i32 %j.026.us.i.i, %mul.us.i118.1.i
+  %arrayidx4.us.i.1.i = getelementptr inbounds i16, i16* %A, i32 %add.us.i.1.i
+  %A9 = load i16, i16* %arrayidx4.us.i.1.i, align 2
+  %conv.us.i.1.i = sext i16 %A9 to i32
+  %arrayidx5.us.i.1.i = getelementptr inbounds i16, i16* %B, i32 %j.026.us.i.i
+  %B0 = load i16, i16* %arrayidx5.us.i.1.i, align 2
+  %conv6.us.i.1.i = sext i16 %B0 to i32
+  %mul7.us.i.1.i = mul nsw i32 %conv6.us.i.1.i, %conv.us.i.1.i
+  %add9.us.i.1.i = add nsw i32 %mul7.us.i.1.i, %A4
+  %inc.us.i.1.i = or i32 %j.026.us.i.i, 1
+  %add.us.i.2.i = add i32 %j.026.us.i.i, %mul.us.i118.2.i
+  %arrayidx4.us.i.2.i = getelementptr inbounds i16, i16* %A, i32 %add.us.i.2.i
+  %B1 = load i16, i16* %arrayidx4.us.i.2.i, align 2
+  %conv.us.i.2.i = sext i16 %B1 to i32
+  %arrayidx5.us.i.2.i = getelementptr inbounds i16, i16* %B, i32 %j.026.us.i.i
+  %B2 = load i16, i16* %arrayidx5.us.i.2.i, align 2
+  %conv6.us.i.2.i = sext i16 %B2 to i32
+  %mul7.us.i.2.i = mul nsw i32 %conv6.us.i.2.i, %conv.us.i.2.i
+  %add9.us.i.2.i = add nsw i32 %mul7.us.i.2.i, %A5
+  %inc.us.i.2.i = or i32 %j.026.us.i.i, 1
+  %add.us.i.3.i = add i32 %j.026.us.i.i, %mul.us.i118.3.i
+  %arrayidx4.us.i.3.i = getelementptr inbounds i16, i16* %A, i32 %add.us.i.3.i
+  %B3 = load i16, i16* %arrayidx4.us.i.3.i, align 2
+  %conv.us.i.3.i = sext i16 %B3 to i32
+  %arrayidx5.us.i.3.i = getelementptr inbounds i16, i16* %B, i32 %j.026.us.i.i
+  %B4 = load i16, i16* %arrayidx5.us.i.3.i, align 2
+  %conv6.us.i.3.i = sext i16 %B4 to i32
+  %mul7.us.i.3.i = mul nsw i32 %conv6.us.i.3.i, %conv.us.i.3.i
+  %add9.us.i.3.i = add nsw i32 %mul7.us.i.3.i, %A6
+  %inc.us.i.3.i = or i32 %j.026.us.i.i, 1
+  %add.us.i.1337.i = add i32 %inc.us.i.i, %mul.us.i118.i
+  %arrayidx4.us.i.1338.i = getelementptr inbounds i16, i16* %A, i32 %add.us.i.1337.i
+  %B5 = load i16, i16* %arrayidx4.us.i.1338.i, align 2
+  %conv.us.i.1339.i = sext i16 %B5 to i32
+  %arrayidx5.us.i.1340.i = getelementptr inbounds i16, i16* %B, i32 %inc.us.i.i
+  %B6 = load i16, i16* %arrayidx5.us.i.1340.i, align 2
+  %conv6.us.i.1341.i = sext i16 %B6 to i32
+  %mul7.us.i.1342.i = mul nsw i32 %conv6.us.i.1341.i, %conv.us.i.1339.i
+  %add9.us.i.1343.i = add nsw i32 %mul7.us.i.1342.i, %add9.us.i.i
+  %inc.us.i.1344.i = or i32 %j.026.us.i.i, 2
+  %add.us.i.1.1.i = add i32 %inc.us.i.1.i, %mul.us.i118.1.i
+  %arrayidx4.us.i.1.1.i = getelementptr inbounds i16, i16* %A, i32 %add.us.i.1.1.i
+  %B7 = load i16, i16* %arrayidx4.us.i.1.1.i, align 2
+  %conv.us.i.1.1.i = sext i16 %B7 to i32
+  %arrayidx5.us.i.1.1.i = getelementptr inbounds i16, i16* %B, i32 %inc.us.i.1.i
+  %B6.dup = load i16, i16* %arrayidx5.us.i.1.1.i, align 2
+  %conv6.us.i.1.1.i = sext i16 %B6.dup to i32
+  %mul7.us.i.1.1.i = mul nsw i32 %conv6.us.i.1.1.i, %conv.us.i.1.1.i
+  %add9.us.i.1.1.i = add nsw i32 %mul7.us.i.1.1.i, %add9.us.i.1.i
+  %inc.us.i.1.1.i = or i32 %j.026.us.i.i, 2
+  %add.us.i.2.1.i = add i32 %inc.us.i.2.i, %mul.us.i118.2.i
+  %arrayidx4.us.i.2.1.i = getelementptr inbounds i16, i16* %A, i32 %add.us.i.2.1.i
+  %B9 = load i16, i16* %arrayidx4.us.i.2.1.i, align 2
+  %conv.us.i.2.1.i = sext i16 %B9 to i32
+  %arrayidx5.us.i.2.1.i = getelementptr inbounds i16, i16* %B, i32 %inc.us.i.2.i
+  %B6.dup.i = load i16, i16* %arrayidx5.us.i.2.1.i, align 2
+  %conv6.us.i.2.1.i = sext i16 %B6.dup.i to i32
+  %mul7.us.i.2.1.i = mul nsw i32 %conv6.us.i.2.1.i, %conv.us.i.2.1.i
+  %add9.us.i.2.1.i = add nsw i32 %mul7.us.i.2.1.i, %add9.us.i.2.i
+  %inc.us.i.2.1.i = or i32 %j.026.us.i.i, 2
+  %add.us.i.3.1.i = add i32 %inc.us.i.3.i, %mul.us.i118.3.i
+  %arrayidx4.us.i.3.1.i = getelementptr inbounds i16, i16* %A, i32 %add.us.i.3.1.i
+  %B11 = load i16, i16* %arrayidx4.us.i.3.1.i, align 2
+  %conv.us.i.3.1.i = sext i16 %B11 to i32
+  %arrayidx5.us.i.3.1.i = getelementptr inbounds i16, i16* %B, i32 %inc.us.i.3.i
+  %B6.dup.i.i = load i16, i16* %arrayidx5.us.i.3.1.i, align 2
+  %conv6.us.i.3.1.i = sext i16 %B6.dup.i.i to i32
+  %mul7.us.i.3.1.i = mul nsw i32 %conv6.us.i.3.1.i, %conv.us.i.3.1.i
+  %add9.us.i.3.1.i = add nsw i32 %mul7.us.i.3.1.i, %add9.us.i.3.i
+  %inc.us.i.3.1.i = or i32 %j.026.us.i.i, 2
+  %add.us.i.2346.i = add i32 %inc.us.i.1344.i, %mul.us.i118.i
+  %arrayidx4.us.i.2347.i = getelementptr inbounds i16, i16* %A, i32 %add.us.i.2346.i
+  %B13 = load i16, i16* %arrayidx4.us.i.2347.i, align 2
+  %conv.us.i.2348.i = sext i16 %B13 to i32
+  %arrayidx5.us.i.2349.i = getelementptr inbounds i16, i16* %B, i32 %inc.us.i.1344.i
+  %B14 = load i16, i16* %arrayidx5.us.i.2349.i, align 2
+  %conv6.us.i.2350.i = sext i16 %B14 to i32
+  %mul7.us.i.2351.i = mul nsw i32 %conv6.us.i.2350.i, %conv.us.i.2348.i
+  %add9.us.i.2352.i = add nsw i32 %mul7.us.i.2351.i, %add9.us.i.1343.i
+  %inc.us.i.2353.i = or i32 %j.026.us.i.i, 3
+  %add.us.i.1.2.i = add i32 %inc.us.i.1.1.i, %mul.us.i118.1.i
+  %arrayidx4.us.i.1.2.i = getelementptr inbounds i16, i16* %A, i32 %add.us.i.1.2.i
+  %B15 = load i16, i16* %arrayidx4.us.i.1.2.i, align 2
+  %conv.us.i.1.2.i = sext i16 %B15 to i32
+  %arrayidx5.us.i.1.2.i = getelementptr inbounds i16, i16* %B, i32 %inc.us.i.1.1.i
+  %B14.dup = load i16, i16* %arrayidx5.us.i.1.2.i, align 2
+  %conv6.us.i.1.2.i = sext i16 %B14.dup to i32
+  %mul7.us.i.1.2.i = mul nsw i32 %conv6.us.i.1.2.i, %conv.us.i.1.2.i
+  %add9.us.i.1.2.i = add nsw i32 %mul7.us.i.1.2.i, %add9.us.i.1.1.i
+  %inc.us.i.1.2.i = or i32 %j.026.us.i.i, 3
+  %add.us.i.2.2.i = add i32 %inc.us.i.2.1.i, %mul.us.i118.2.i
+  %arrayidx4.us.i.2.2.i = getelementptr inbounds i16, i16* %A, i32 %add.us.i.2.2.i
+  %B17 = load i16, i16* %arrayidx4.us.i.2.2.i, align 2
+  %conv.us.i.2.2.i = sext i16 %B17 to i32
+  %arrayidx5.us.i.2.2.i = getelementptr inbounds i16, i16* %B, i32 %inc.us.i.2.1.i
+  %B14.dup.i = load i16, i16* %arrayidx5.us.i.2.2.i, align 2
+  %conv6.us.i.2.2.i = sext i16 %B14.dup.i to i32
+  %mul7.us.i.2.2.i = mul nsw i32 %conv6.us.i.2.2.i, %conv.us.i.2.2.i
+  %add9.us.i.2.2.i = add nsw i32 %mul7.us.i.2.2.i, %add9.us.i.2.1.i
+  %inc.us.i.2.2.i = or i32 %j.026.us.i.i, 3
+  %add.us.i.3.2.i = add i32 %inc.us.i.3.1.i, %mul.us.i118.3.i
+  %arrayidx4.us.i.3.2.i = getelementptr inbounds i16, i16* %A, i32 %add.us.i.3.2.i
+  %B19 = load i16, i16* %arrayidx4.us.i.3.2.i, align 2
+  %conv.us.i.3.2.i = sext i16 %B19 to i32
+  %arrayidx5.us.i.3.2.i = getelementptr inbounds i16, i16* %B, i32 %inc.us.i.3.1.i
+  %B14.dup.i.i = load i16, i16* %arrayidx5.us.i.3.2.i, align 2
+  %conv6.us.i.3.2.i = sext i16 %B14.dup.i.i to i32
+  %mul7.us.i.3.2.i = mul nsw i32 %conv6.us.i.3.2.i, %conv.us.i.3.2.i
+  %add9.us.i.3.2.i = add nsw i32 %mul7.us.i.3.2.i, %add9.us.i.3.1.i
+  %inc.us.i.3.2.i = or i32 %j.026.us.i.i, 3
+  %add.us.i.3355.i = add i32 %inc.us.i.2353.i, %mul.us.i118.i
+  %arrayidx4.us.i.3356.i = getelementptr inbounds i16, i16* %A, i32 %add.us.i.3355.i
+  %B21 = load i16, i16* %arrayidx4.us.i.3356.i, align 2
+  %conv.us.i.3357.i = sext i16 %B21 to i32
+  %arrayidx5.us.i.3358.i = getelementptr inbounds i16, i16* %B, i32 %inc.us.i.2353.i
+  %B22 = load i16, i16* %arrayidx5.us.i.3358.i, align 2
+  %conv6.us.i.3359.i = sext i16 %B22 to i32
+  %mul7.us.i.3360.i = mul nsw i32 %conv6.us.i.3359.i, %conv.us.i.3357.i
+  %add9.us.i.3361.i = add nsw i32 %mul7.us.i.3360.i, %add9.us.i.2352.i
+  %inc.us.i.3362.i = add i32 %j.026.us.i.i, 4
+  %add.us.i.1.3.i = add i32 %inc.us.i.1.2.i, %mul.us.i118.1.i
+  %arrayidx4.us.i.1.3.i = getelementptr inbounds i16, i16* %A, i32 %add.us.i.1.3.i
+  %B23 = load i16, i16* %arrayidx4.us.i.1.3.i, align 2
+  %conv.us.i.1.3.i = sext i16 %B23 to i32
+  %arrayidx5.us.i.1.3.i = getelementptr inbounds i16, i16* %B, i32 %inc.us.i.1.2.i
+  %B22.dup = load i16, i16* %arrayidx5.us.i.1.3.i, align 2
+  %conv6.us.i.1.3.i = sext i16 %B22.dup to i32
+  %mul7.us.i.1.3.i = mul nsw i32 %conv6.us.i.1.3.i, %conv.us.i.1.3.i
+  %add9.us.i.1.3.i = add nsw i32 %mul7.us.i.1.3.i, %add9.us.i.1.2.i
+  %add.us.i.2.3.i = add i32 %inc.us.i.2.2.i, %mul.us.i118.2.i
+  %arrayidx4.us.i.2.3.i = getelementptr inbounds i16, i16* %A, i32 %add.us.i.2.3.i
+  %B25 = load i16, i16* %arrayidx4.us.i.2.3.i, align 2
+  %conv.us.i.2.3.i = sext i16 %B25 to i32
+  %arrayidx5.us.i.2.3.i = getelementptr inbounds i16, i16* %B, i32 %inc.us.i.2.2.i
+  %B22.dup.i = load i16, i16* %arrayidx5.us.i.2.3.i, align 2
+  %conv6.us.i.2.3.i = sext i16 %B22.dup.i to i32
+  %mul7.us.i.2.3.i = mul nsw i32 %conv6.us.i.2.3.i, %conv.us.i.2.3.i
+  %add9.us.i.2.3.i = add nsw i32 %mul7.us.i.2.3.i, %add9.us.i.2.2.i
+  %add.us.i.3.3.i = add i32 %inc.us.i.3.2.i, %mul.us.i118.3.i
+  %arrayidx4.us.i.3.3.i = getelementptr inbounds i16, i16* %A, i32 %add.us.i.3.3.i
+  %B27 = load i16, i16* %arrayidx4.us.i.3.3.i, align 2
+  %conv.us.i.3.3.i = sext i16 %B27 to i32
+  %arrayidx5.us.i.3.3.i = getelementptr inbounds i16, i16* %B, i32 %inc.us.i.3.2.i
+  %B22.dup.i.i = load i16, i16* %arrayidx5.us.i.3.3.i, align 2
+  %conv6.us.i.3.3.i = sext i16 %B22.dup.i.i to i32
+  %mul7.us.i.3.3.i = mul nsw i32 %conv6.us.i.3.3.i, %conv.us.i.3.3.i
+  %add9.us.i.3.3.i = add nsw i32 %mul7.us.i.3.3.i, %add9.us.i.3.2.i
+  %niter335.nsub.3.i = add i32 %niter335.i, -4
+  %niter335.ncmp.3.i = icmp eq i32 %niter335.nsub.3.i, 0
+  br i1 %niter335.ncmp.3.i, label %exit, label %for.body
+
+exit:
+  %arrayidx.out.i = getelementptr inbounds i32, i32* %res, i32 0
+  store i32 %add9.us.i.3361.i, i32* %arrayidx.out.i, align 4
+  %arrayidx.out.1.i = getelementptr inbounds i32, i32* %res, i32 1
+  store i32 %add9.us.i.1.3.i, i32* %arrayidx.out.1.i, align 4
+  %arrayidx.out.2.i = getelementptr inbounds i32, i32* %res, i32 2
+  store i32 %add9.us.i.2.3.i, i32* %arrayidx.out.2.i, align 4
+  %arrayidx.out.3.i = getelementptr inbounds i32, i32* %res, i32 3
+  store i32 %add9.us.i.3.3.i, i32* %arrayidx.out.3.i, align 4
+  ret void
+}