[ARM] Exchange MAC operands in ARMParallelDSP

SMLAD and SMLALD instructions also come in the form of SMLADX and
SMLALDX which perform an exchange on their second operand. To support
this, more of the loads in the MAC candidates are compared for
sequential access and a boolean value has been added to BinOpChain.

AddMACCandiate has been refactored into a small pattern matching
state machine to reduce the amount of duplicated code, but also to
enable the matching to be more flexible. CreateParallelMACPairs now
iterates through all the candidates to find parallel ones.

Differential Revision: https://reviews.llvm.org/D51424

llvm-svn: 342033
diff --git a/llvm/lib/Target/ARM/ARMParallelDSP.cpp b/llvm/lib/Target/ARM/ARMParallelDSP.cpp
index a6a218c..21a0634 100644
--- a/llvm/lib/Target/ARM/ARMParallelDSP.cpp
+++ b/llvm/lib/Target/ARM/ARMParallelDSP.cpp
@@ -92,6 +92,7 @@
   struct BinOpChain : public OpChain {
     ValueList     LHS;      // List of all (narrow) left hand operands.
     ValueList     RHS;      // List of all (narrow) right hand operands.
+    bool Exchange = false;
 
     BinOpChain(Instruction *I, ValueList &lhs, ValueList &rhs) :
       OpChain(I, lhs), LHS(lhs), RHS(rhs) {
@@ -125,7 +126,8 @@
     bool AreSequentialLoads(LoadInst *Ld0, LoadInst *Ld1, MemInstList &VecMem);
     PMACPairList CreateParallelMACPairs(OpChainList &Candidates);
     Instruction *CreateSMLADCall(LoadInst *VecLd0, LoadInst *VecLd1,
-                                 Instruction *Acc, Instruction *InsertAfter);
+                                 Instruction *Acc, bool Exchange,
+                                 Instruction *InsertAfter);
 
     /// Try to match and generate: SMLAD, SMLADX - Signed Multiply Accumulate
     /// Dual performs two signed 16x16-bit multiplications. It adds the
@@ -198,7 +200,8 @@
       LoopAccessInfo LAI(L, SE, TLI, AA, DT, LI);
       bool Changes = false;
 
-      LLVM_DEBUG(dbgs() << "\n== Parallel DSP pass ==\n\n");
+      LLVM_DEBUG(dbgs() << "\n== Parallel DSP pass ==\n");
+      LLVM_DEBUG(dbgs() << " - " << F.getName() << "\n\n");
       Changes = MatchSMLAD(F);
       return Changes;
     }
@@ -300,6 +303,7 @@
     return false;
   }
   if (isConsecutiveAccess(MemOp0, MemOp1, DL, SE)) {
+    VecMem.clear();
     VecMem.push_back(MemOp0);
     VecMem.push_back(MemOp1);
     LLVM_DEBUG(dbgs() << "OK: accesses are consecutive.\n");
@@ -335,53 +339,90 @@
   if (Elems < 2)
     return PMACPairs;
 
-  // TODO: for now we simply try to match consecutive pairs i and i+1.
-  // We can compare all elements, but then we need to compare and evaluate
-  // different solutions.
-  for(unsigned i=0; i<Elems-1; i+=2) {
+  SmallPtrSet<const Instruction*, 4> Paired;
+  for (unsigned i = 0; i < Elems; ++i) {
     BinOpChain *PMul0 = static_cast<BinOpChain*>(Candidates[i].get());
-    BinOpChain *PMul1 = static_cast<BinOpChain*>(Candidates[i+1].get());
-    const Instruction *Mul0 = PMul0->Root;
-    const Instruction *Mul1 = PMul1->Root;
-
-    if (Mul0 == Mul1)
+    if (Paired.count(PMul0->Root))
       continue;
 
-    LLVM_DEBUG(dbgs() << "\nCheck parallel muls:\n";
-               dbgs() << "- "; Mul0->dump();
-               dbgs() << "- "; Mul1->dump());
+    for (unsigned j = 0; j < Elems; ++j) {
+      if (i == j)
+        continue;
 
-    const ValueList &Mul0_LHS = PMul0->LHS;
-    const ValueList &Mul0_RHS = PMul0->RHS;
-    const ValueList &Mul1_LHS = PMul1->LHS;
-    const ValueList &Mul1_RHS = PMul1->RHS;
+      BinOpChain *PMul1 = static_cast<BinOpChain*>(Candidates[j].get());
+      if (Paired.count(PMul1->Root))
+        continue;
 
-    if (!AreSymmetrical(Mul0_LHS, Mul1_LHS) ||
-        !AreSymmetrical(Mul0_RHS, Mul1_RHS))
-      continue;
+      const Instruction *Mul0 = PMul0->Root;
+      const Instruction *Mul1 = PMul1->Root;
+      if (Mul0 == Mul1)
+        continue;
 
-    LLVM_DEBUG(dbgs() << "OK: mul operands list match:\n");
-    // The first elements of each vector should be loads with sexts. If we find
-    // that its two pairs of consecutive loads, then these can be transformed
-    // into two wider loads and the users can be replaced with DSP
-    // intrinsics.
-    for (unsigned x = 0; x < Mul0_LHS.size(); x += 2) {
-      auto *Ld0 = dyn_cast<LoadInst>(Mul0_LHS[x]);
-      auto *Ld1 = dyn_cast<LoadInst>(Mul1_LHS[x]);
-      auto *Ld2 = dyn_cast<LoadInst>(Mul0_RHS[x]);
-      auto *Ld3 = dyn_cast<LoadInst>(Mul1_RHS[x]);
+      assert(PMul0 != PMul1 && "expected different chains");
 
-      LLVM_DEBUG(dbgs() << "Looking at operands " << x << ":\n";
-                 dbgs() << "\t mul1: "; Mul0_LHS[x]->dump();
-                 dbgs() << "\t mul2: "; Mul1_LHS[x]->dump();
-                 dbgs() << "and operands " << x + 2 << ":\n";
-                 dbgs() << "\t mul1: "; Mul0_RHS[x]->dump();
-                 dbgs() << "\t mul2: "; Mul1_RHS[x]->dump());
+      LLVM_DEBUG(dbgs() << "\nCheck parallel muls:\n";
+                 dbgs() << "- "; Mul0->dump();
+                 dbgs() << "- "; Mul1->dump());
 
-      if (AreSequentialLoads(Ld0, Ld1, PMul0->VecLd) &&
-          AreSequentialLoads(Ld2, Ld3, PMul1->VecLd)) {
-        LLVM_DEBUG(dbgs() << "OK: found two pairs of parallel loads!\n");
-        PMACPairs.push_back(std::make_pair(PMul0, PMul1));
+      const ValueList &Mul0_LHS = PMul0->LHS;
+      const ValueList &Mul0_RHS = PMul0->RHS;
+      const ValueList &Mul1_LHS = PMul1->LHS;
+      const ValueList &Mul1_RHS = PMul1->RHS;
+
+      if (!AreSymmetrical(Mul0_LHS, Mul1_LHS) ||
+          !AreSymmetrical(Mul0_RHS, Mul1_RHS))
+        continue;
+
+      LLVM_DEBUG(dbgs() << "OK: mul operands list match:\n");
+      // The first elements of each vector should be loads with sexts. If we
+      // find that its two pairs of consecutive loads, then these can be
+      // transformed into two wider loads and the users can be replaced with
+      // DSP intrinsics.
+      bool Found = false;
+      for (unsigned x = 0; x < Mul0_LHS.size(); x += 2) {
+        auto *Ld0 = dyn_cast<LoadInst>(Mul0_LHS[x]);
+        auto *Ld1 = dyn_cast<LoadInst>(Mul1_LHS[x]);
+        auto *Ld2 = dyn_cast<LoadInst>(Mul0_RHS[x]);
+        auto *Ld3 = dyn_cast<LoadInst>(Mul1_RHS[x]);
+
+        if (!Ld0 || !Ld1 || !Ld2 || !Ld3)
+          continue;
+
+        LLVM_DEBUG(dbgs() << "Looking at operands " << x << ":\n"
+                   << "\t Ld0: " << *Ld0 << "\n"
+                   << "\t Ld1: " << *Ld1 << "\n"
+                   << "and operands " << x + 2 << ":\n"
+                   << "\t Ld2: " << *Ld2 << "\n"
+                   << "\t Ld3: " << *Ld3 << "\n");
+
+        if (AreSequentialLoads(Ld0, Ld1, PMul0->VecLd)) {
+          if (AreSequentialLoads(Ld2, Ld3, PMul1->VecLd)) {
+            LLVM_DEBUG(dbgs() << "OK: found two pairs of parallel loads!\n");
+            PMACPairs.push_back(std::make_pair(PMul0, PMul1));
+            Found = true;
+          } else if (AreSequentialLoads(Ld3, Ld2, PMul1->VecLd)) {
+            LLVM_DEBUG(dbgs() << "OK: found two pairs of parallel loads!\n");
+            LLVM_DEBUG(dbgs() << "    exchanging Ld2 and Ld3\n");
+            PMul1->Exchange = true;
+            PMACPairs.push_back(std::make_pair(PMul0, PMul1));
+            Found = true;
+          }
+        } else if (AreSequentialLoads(Ld1, Ld0, PMul0->VecLd)) {
+          if (AreSequentialLoads(Ld2, Ld3, PMul1->VecLd)) {
+            LLVM_DEBUG(dbgs() << "OK: found two pairs of parallel loads!\n");
+            LLVM_DEBUG(dbgs() << "    exchanging Ld0 and Ld1\n");
+            LLVM_DEBUG(dbgs() << "    and swapping muls\n");
+            PMul0->Exchange = true;
+            // Only the second operand can be exchanged, so swap the muls.
+            PMACPairs.push_back(std::make_pair(PMul1, PMul0));
+            Found = true;
+          }
+        }
+      }
+      if (Found) {
+        Paired.insert(Mul0);
+        Paired.insert(Mul1);
+        break;
       }
     }
   }
@@ -394,12 +435,15 @@
   Instruction *InsertAfter = Reduction.AccIntAdd;
 
   for (auto &Pair : PMACPairs) {
+    BinOpChain *PMul0 = Pair.first;
+    BinOpChain *PMul1 = Pair.second;
     LLVM_DEBUG(dbgs() << "Found parallel MACs!!\n";
-               dbgs() << "- "; Pair.first->Root->dump();
-               dbgs() << "- "; Pair.second->Root->dump());
-    auto *VecLd0 = cast<LoadInst>(Pair.first->VecLd[0]);
-    auto *VecLd1 = cast<LoadInst>(Pair.second->VecLd[0]);
-    Acc = CreateSMLADCall(VecLd0, VecLd1, Acc, InsertAfter);
+               dbgs() << "- "; PMul0->Root->dump();
+               dbgs() << "- "; PMul1->Root->dump());
+
+    auto *VecLd0 = cast<LoadInst>(PMul0->VecLd[0]);
+    auto *VecLd1 = cast<LoadInst>(PMul1->VecLd[0]);
+    Acc = CreateSMLADCall(VecLd0, VecLd1, Acc, PMul1->Exchange, InsertAfter);
     InsertAfter = Acc;
   }
 
@@ -453,7 +497,6 @@
 }
 
 static void AddMACCandidate(OpChainList &Candidates,
-                            const Instruction *Acc,
                             Instruction *Mul,
                             Value *MulOp0, Value *MulOp1) {
   LLVM_DEBUG(dbgs() << "OK, found acc mul:\t"; Mul->dump());
@@ -470,55 +513,44 @@
 
 static void MatchParallelMACSequences(Reduction &R,
                                       OpChainList &Candidates) {
-  const Instruction *Acc = R.AccIntAdd;
-  Value *A, *MulOp0, *MulOp1;
-  LLVM_DEBUG(dbgs() << "\n- Analysing:\t"; Acc->dump());
+  Instruction *Acc = R.AccIntAdd;
+  LLVM_DEBUG(dbgs() << "\n- Analysing:\t" << *Acc);
 
-  // Pattern 1: the accumulator is the RHS of the mul.
-  while(match(Acc, m_Add(m_Mul(m_Value(MulOp0), m_Value(MulOp1)),
-                         m_Value(A)))){
-    Instruction *Mul = cast<Instruction>(Acc->getOperand(0));
-    AddMACCandidate(Candidates, Acc, Mul, MulOp0, MulOp1);
-    Acc = dyn_cast<Instruction>(A);
-  }
-  // Pattern 2: the accumulator is the LHS of the mul.
-  while(match(Acc, m_Add(m_Value(A),
-                         m_Mul(m_Value(MulOp0), m_Value(MulOp1))))) {
-    Instruction *Mul = cast<Instruction>(Acc->getOperand(1));
-    AddMACCandidate(Candidates, Acc, Mul, MulOp0, MulOp1);
-    Acc = dyn_cast<Instruction>(A);
-  }
+  // Returns false to signal the search should be stopped.
+  std::function<bool(Value*)> Match =
+    [&Candidates, &Match](Value *V) -> bool {
 
-  // The last mul in the chain has a slightly different pattern:
-  // the mul is the first operand
-  if (match(Acc, m_Add(m_Mul(m_Value(MulOp0), m_Value(MulOp1)), m_Value(A))))
-    AddMACCandidate(Candidates, Acc, cast<Instruction>(Acc->getOperand(0)),
-                    MulOp0, MulOp1);
+    auto *I = dyn_cast<Instruction>(V);
+    if (!V)
+      return false;
 
-  // Same as above, but SMLALD may perform 32-bit muls, sext the results and
-  // then accumulate.
-  while(match(Acc, m_Add(m_SExt(m_Mul(m_Value(MulOp0), m_Value(MulOp1))),
-                        m_Value(A)))) {
-    Value *Mul = cast<Instruction>(Acc->getOperand(0))->getOperand(0);
-    AddMACCandidate(Candidates, Acc, cast<Instruction>(Mul), MulOp0, MulOp1);
-    Acc = dyn_cast<Instruction>(A);
-  }
-  while(match(Acc, m_Add(m_Value(A),
-                         m_SExt(m_Mul(m_Value(MulOp0), m_Value(MulOp1)))))) {
-    Value *Mul = cast<Instruction>(Acc->getOperand(1))->getOperand(0);
-    AddMACCandidate(Candidates, Acc, cast<Instruction>(Mul), MulOp0, MulOp1);
-    Acc = dyn_cast<Instruction>(A);
-  }
-  if (match(Acc, m_Add(m_SExt(m_Mul(m_Value(MulOp0), m_Value(MulOp1))),
-                       m_Value(A)))) {
-    Value *Mul = cast<Instruction>(
-      cast<Instruction>(Acc)->getOperand(0))->getOperand(0);
-    AddMACCandidate(Candidates, Acc, cast<Instruction>(Mul), MulOp0, MulOp1);
-  }
+    Value *MulOp0, *MulOp1;
 
-  // Because we start at the bottom of the chain, and we work our way up,
-  // the muls are added in reverse program order to the list.
-  std::reverse(Candidates.begin(), Candidates.end());
+    switch (I->getOpcode()) {
+    case Instruction::Add:
+      if (Match(I->getOperand(0)) || (Match(I->getOperand(1))))
+        return true;
+      break;
+    case Instruction::Mul:
+      if (match (I, (m_Mul(m_Value(MulOp0), m_Value(MulOp1))))) {
+        AddMACCandidate(Candidates, I, MulOp0, MulOp1);
+        return false;
+      }
+      break;
+    case Instruction::SExt:
+      if (match (I, (m_SExt(m_Mul(m_Value(MulOp0), m_Value(MulOp1)))))) {
+        Instruction *Mul = cast<Instruction>(I->getOperand(0));
+        AddMACCandidate(Candidates, Mul, MulOp0, MulOp1);
+        return false;
+      }
+      break;
+    }
+    return false;
+  };
+
+  while (Match (Acc));
+  LLVM_DEBUG(dbgs() << "Finished matching MAC sequences, found "
+             << Candidates.size() << " candidates.\n");
 }
 
 // Collects all instructions that are not part of the MAC chains, which is the
@@ -661,34 +693,41 @@
   return Changed;
 }
 
-static void CreateLoadIns(IRBuilder<NoFolder> &IRB, LoadInst **VecLd,
-                          const Type *LoadTy) {
-  const unsigned AddrSpace = (*VecLd)->getPointerAddressSpace();
+static LoadInst *CreateLoadIns(IRBuilder<NoFolder> &IRB, LoadInst &BaseLoad,
+                               const Type *LoadTy) {
+  const unsigned AddrSpace = BaseLoad.getPointerAddressSpace();
 
-  Value *VecPtr = IRB.CreateBitCast((*VecLd)->getPointerOperand(),
+  Value *VecPtr = IRB.CreateBitCast(BaseLoad.getPointerOperand(),
                                     LoadTy->getPointerTo(AddrSpace));
-  *VecLd = IRB.CreateAlignedLoad(VecPtr, (*VecLd)->getAlignment());
+  return IRB.CreateAlignedLoad(VecPtr, BaseLoad.getAlignment());
 }
 
 Instruction *ARMParallelDSP::CreateSMLADCall(LoadInst *VecLd0, LoadInst *VecLd1,
-                                             Instruction *Acc,
+                                             Instruction *Acc, bool Exchange,
                                              Instruction *InsertAfter) {
-  LLVM_DEBUG(dbgs() << "Create SMLAD intrinsic using:\n";
-             dbgs() << "- "; VecLd0->dump();
-             dbgs() << "- "; VecLd1->dump();
-             dbgs() << "- "; Acc->dump());
+  LLVM_DEBUG(dbgs() << "Create SMLAD intrinsic using:\n"
+             << "- " << *VecLd0 << "\n"
+             << "- " << *VecLd1 << "\n"
+             << "- " << *Acc << "\n"
+             << "Exchange: " << Exchange << "\n");
 
   IRBuilder<NoFolder> Builder(InsertAfter->getParent(),
                               ++BasicBlock::iterator(InsertAfter));
 
   // Replace the reduction chain with an intrinsic call
   const Type *Ty = IntegerType::get(M->getContext(), 32);
-  CreateLoadIns(Builder, &VecLd0, Ty);
-  CreateLoadIns(Builder, &VecLd1, Ty);
-  Value* Args[] = { VecLd0, VecLd1, Acc };
-  Function *SMLAD = Acc->getType()->isIntegerTy(32) ?
-    Intrinsic::getDeclaration(M, Intrinsic::arm_smlad) :
-    Intrinsic::getDeclaration(M, Intrinsic::arm_smlald);
+  LoadInst *NewLd0 = CreateLoadIns(Builder, VecLd0[0], Ty);
+  LoadInst *NewLd1 = CreateLoadIns(Builder, VecLd1[0], Ty);
+  Value* Args[] = { NewLd0, NewLd1, Acc };
+  Function *SMLAD = nullptr;
+  if (Exchange)
+    SMLAD = Acc->getType()->isIntegerTy(32) ?
+      Intrinsic::getDeclaration(M, Intrinsic::arm_smladx) :
+      Intrinsic::getDeclaration(M, Intrinsic::arm_smlaldx);
+  else
+    SMLAD = Acc->getType()->isIntegerTy(32) ?
+      Intrinsic::getDeclaration(M, Intrinsic::arm_smlad) :
+      Intrinsic::getDeclaration(M, Intrinsic::arm_smlald);
   CallInst *Call = Builder.CreateCall(SMLAD, Args);
   NumSMLAD++;
   return Call;
diff --git a/llvm/test/CodeGen/ARM/smladx-1.ll b/llvm/test/CodeGen/ARM/smladx-1.ll
new file mode 100644
index 0000000..d5e9a06
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/smladx-1.ll
@@ -0,0 +1,240 @@
+; RUN: opt -mtriple=thumbv8m.main -mcpu=cortex-m33 -arm-parallel-dsp %s -S -o - | FileCheck %s
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m0 < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 -mattr=-dsp < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+
+define i32 @smladx(i16* nocapture readonly %pIn1, i16* nocapture readonly %pIn2, i32 %j, i32 %limit) {
+
+; CHECK-LABEL: smladx
+; CHECK: = phi i32 [ 0, %for.body.preheader.new ],
+; CHECK: [[ACC0:%[^ ]+]] = phi i32 [ 0, %for.body.preheader.new ], [ [[ACC2:%[^ ]+]], %for.body ]
+; CHECK: [[PIN23:%[^ ]+]] = bitcast i16* %pIn2.3 to i32*
+; CHECK: [[IN23:%[^ ]+]] = load i32, i32* [[PIN23]], align 2
+; CHECK: [[PIN12:%[^ ]+]] = bitcast i16* %pIn1.2 to i32*
+; CHECK: [[IN12:%[^ ]+]] = load i32, i32* [[PIN12]], align 2
+; CHECK: [[ACC1:%[^ ]+]] = call i32 @llvm.arm.smladx(i32 [[IN23]], i32 [[IN12]], i32 [[ACC0]])
+; CHECK: [[PIN21:%[^ ]+]] = bitcast i16* %pIn2.1 to i32*
+; CHECK: [[IN21:%[^ ]+]] = load i32, i32* [[PIN21]], align 2
+; CHECK: [[PIN10:%[^ ]+]] = bitcast i16* %pIn1.0 to i32*
+; CHECK: [[IN10:%[^ ]+]] = load i32, i32* [[PIN10]], align 2
+; CHECK: [[ACC2]] = call i32 @llvm.arm.smladx(i32 [[IN21]], i32 [[IN10]], i32 [[ACC1]])
+; CHECK-NOT: call i32 @llvm.arm.smlad
+; CHECK-UNSUPPORTED-NOT:  call i32 @llvm.arm.smlad
+
+entry:
+  %cmp9 = icmp eq i32 %limit, 0
+  br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:
+  %0 = add i32 %limit, -1
+  %xtraiter = and i32 %limit, 3
+  %1 = icmp ult i32 %0, 3
+  br i1 %1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
+
+for.body.preheader.new:
+  %unroll_iter = sub i32 %limit, %xtraiter
+  br label %for.body
+
+for.cond.cleanup.loopexit.unr-lcssa:
+  %add.lcssa.ph = phi i32 [ undef, %for.body.preheader ], [ %add.3, %for.body ]
+  %i.011.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ]
+  %sum.010.unr = phi i32 [ 0, %for.body.preheader ], [ %add.3, %for.body ]
+  %lcmp.mod = icmp eq i32 %xtraiter, 0
+  br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil
+
+for.body.epil:
+  %i.011.epil = phi i32 [ %inc.epil, %for.body.epil ], [ %i.011.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
+  %sum.010.epil = phi i32 [ %add.epil, %for.body.epil ], [ %sum.010.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
+  %epil.iter = phi i32 [ %epil.iter.sub, %for.body.epil ], [ %xtraiter, %for.cond.cleanup.loopexit.unr-lcssa ]
+  %sub.epil = sub i32 %j, %i.011.epil
+  %arrayidx.epil = getelementptr inbounds i16, i16* %pIn2, i32 %sub.epil
+  %2 = load i16, i16* %arrayidx.epil, align 2
+  %conv.epil = sext i16 %2 to i32
+  %arrayidx1.epil = getelementptr inbounds i16, i16* %pIn1, i32 %i.011.epil
+  %3 = load i16, i16* %arrayidx1.epil, align 2
+  %conv2.epil = sext i16 %3 to i32
+  %mul.epil = mul nsw i32 %conv2.epil, %conv.epil
+  %add.epil = add nsw i32 %mul.epil, %sum.010.epil
+  %inc.epil = add nuw i32 %i.011.epil, 1
+  %epil.iter.sub = add i32 %epil.iter, -1
+  %epil.iter.cmp = icmp eq i32 %epil.iter.sub, 0
+  br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil
+
+for.cond.cleanup:
+  %sum.0.lcssa = phi i32 [ 0, %entry ], [ %add.lcssa.ph, %for.cond.cleanup.loopexit.unr-lcssa ], [ %add.epil, %for.body.epil ]
+  ret i32 %sum.0.lcssa
+
+for.body:
+  %i.011 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
+  %sum.010 = phi i32 [ 0, %for.body.preheader.new ], [ %add.3, %for.body ]
+  %niter = phi i32 [ %unroll_iter, %for.body.preheader.new ], [ %niter.nsub.3, %for.body ]
+  %pIn2Base = phi i16* [ %pIn2, %for.body.preheader.new ], [ %pIn2.4, %for.body ]
+  %pIn2.0 = getelementptr inbounds i16, i16* %pIn2Base, i32 0
+  %In2 = load i16, i16* %pIn2.0, align 2
+  %pIn1.0 = getelementptr inbounds i16, i16* %pIn1, i32 %i.011
+  %In1 = load i16, i16* %pIn1.0, align 2
+  %inc = or i32 %i.011, 1
+  %pIn2.1 = getelementptr inbounds i16, i16* %pIn2Base, i32 -1
+  %In2.1 = load i16, i16* %pIn2.1, align 2
+  %pIn1.1 = getelementptr inbounds i16, i16* %pIn1, i32 %inc
+  %In1.1 = load i16, i16* %pIn1.1, align 2
+  %inc.1 = or i32 %i.011, 2
+  %pIn2.2 = getelementptr inbounds i16, i16* %pIn2Base, i32 -2
+  %In2.2 = load i16, i16* %pIn2.2, align 2
+  %pIn1.2 = getelementptr inbounds i16, i16* %pIn1, i32 %inc.1
+  %In1.2 = load i16, i16* %pIn1.2, align 2
+  %inc.2 = or i32 %i.011, 3
+  %pIn2.3 = getelementptr inbounds i16, i16* %pIn2Base, i32 -3
+  %In2.3 = load i16, i16* %pIn2.3, align 2
+  %pIn1.3 = getelementptr inbounds i16, i16* %pIn1, i32 %inc.2
+  %In1.3 = load i16, i16* %pIn1.3, align 2
+  %sextIn1 = sext i16 %In1 to i32
+  %sextIn1.1 = sext i16 %In1.1 to i32
+  %sextIn1.2 = sext i16 %In1.2 to i32
+  %sextIn1.3 = sext i16 %In1.3 to i32
+  %sextIn2 = sext i16 %In2 to i32
+  %sextIn2.1 = sext i16 %In2.1 to i32
+  %sextIn2.2 = sext i16 %In2.2 to i32
+  %sextIn2.3 = sext i16 %In2.3 to i32
+  %mul = mul nsw i32 %sextIn1, %sextIn2
+  %mul.1 = mul nsw i32 %sextIn1.1, %sextIn2.1
+  %mul.2 = mul nsw i32 %sextIn1.2, %sextIn2.2
+  %mul.3 = mul nsw i32 %sextIn1.3, %sextIn2.3
+  %add = add nsw i32 %mul, %sum.010
+  %add.1 = add nsw i32 %mul.1, %add
+  %add.2 = add nsw i32 %mul.2, %add.1
+  %add.3 = add nsw i32 %mul.3, %add.2
+  %inc.3 = add i32 %i.011, 4
+  %pIn2.4 = getelementptr inbounds i16, i16* %pIn2Base, i32 -4
+  %niter.nsub.3 = add i32 %niter, -4
+  %niter.ncmp.3 = icmp eq i32 %niter.nsub.3, 0
+  br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body
+}
+
+define i32 @smladx_swap(i16* nocapture readonly %pIn1, i16* nocapture readonly %pIn2, i32 %j, i32 %limit) {
+
+; CHECK-LABEL: smladx_swap
+; CHECK: for.body.preheader.new:
+; CHECK: [[PIN1Base:[^ ]+]] = getelementptr i16, i16* %pIn1
+; CHECK: [[PIN2Base:[^ ]+]] = getelementptr i16, i16* %pIn2
+
+; CHECK: for.body:
+; CHECK: [[PIN2:%[^ ]+]] = phi i16* [ [[PIN2_NEXT:%[^ ]+]], %for.body ], [ [[PIN2Base]], %for.body.preheader.new ]
+; CHECK: [[PIN1:%[^ ]+]] = phi i16* [ [[PIN1_NEXT:%[^ ]+]], %for.body ], [ [[PIN1Base]], %for.body.preheader.new ]
+; CHECK: [[IV:%[^ ]+]] = phi i32
+; CHECK: [[ACC0:%[^ ]+]] = phi i32 [ 0, %for.body.preheader.new ], [ [[ACC2:%[^ ]+]], %for.body ]
+
+; CHECK: [[PIN1_2:%[^ ]+]] = getelementptr i16, i16* [[PIN1]], i32 -2
+; CHECK: [[PIN2_2:%[^ ]+]] = getelementptr i16, i16* [[PIN2]], i32 -2
+
+
+; CHECK: [[PIN2_2_CAST:%[^ ]+]] = bitcast i16* [[PIN2_2]] to i32*
+; CHECK: [[IN2_2:%[^ ]+]] = load i32, i32* [[PIN2_2_CAST]], align 2
+; CHECK: [[PIN1_CAST:%[^ ]+]] = bitcast i16* [[PIN1]] to i32*
+; CHECK: [[IN1:%[^ ]+]] = load i32, i32* [[PIN1_CAST]], align 2
+; CHECK: [[ACC1:%[^ ]+]] = call i32 @llvm.arm.smladx(i32 [[IN2_2]], i32 [[IN1]], i32 [[ACC0]])
+
+; CHECK: [[PIN2_CAST:%[^ ]+]] = bitcast i16* [[PIN2]] to i32*
+; CHECK: [[IN2:%[^ ]+]] = load i32, i32* [[PIN2_CAST]], align 2
+; CHECK: [[PIN1_2_CAST:%[^ ]+]] = bitcast i16* [[PIN1_2]] to i32*
+; CHECK: [[IN1_2:%[^ ]+]] = load i32, i32* [[PIN1_2_CAST]], align 2
+; CHECK: [[ACC2]] = call i32 @llvm.arm.smladx(i32 [[IN2]], i32 [[IN1_2]], i32 [[ACC1]])
+
+; CHECK: [[PIN1_NEXT]] = getelementptr i16, i16* [[PIN1]], i32 4
+; CHECK: [[PIN2_NEXT]] = getelementptr i16, i16* [[PIN2]], i32 -4
+
+; CHECK-NOT: call i32 @llvm.arm.smlad
+; CHECK-UNSUPPORTED-NOT:  call i32 @llvm.arm.smlad
+
+entry:
+  %cmp9 = icmp eq i32 %limit, 0
+  br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:
+  %0 = add i32 %limit, -1
+  %xtraiter = and i32 %limit, 3
+  %1 = icmp ult i32 %0, 3
+  br i1 %1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
+
+for.body.preheader.new:
+  %unroll_iter = sub i32 %limit, %xtraiter
+  %scevgep6 = getelementptr i16, i16* %pIn1, i32 2
+  %2 = add i32 %j, -1
+  %scevgep11 = getelementptr i16, i16* %pIn2, i32 %2
+  br label %for.body
+
+for.cond.cleanup.loopexit.unr-lcssa:
+  %add.lcssa.ph = phi i32 [ undef, %for.body.preheader ], [ %add.3, %for.body ]
+  %i.011.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ]
+  %sum.010.unr = phi i32 [ 0, %for.body.preheader ], [ %add.3, %for.body ]
+  %lcmp.mod = icmp eq i32 %xtraiter, 0
+  br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil.preheader
+
+for.body.epil.preheader:
+  %scevgep = getelementptr i16, i16* %pIn1, i32 %i.011.unr
+  %3 = sub i32 %j, %i.011.unr
+  %scevgep2 = getelementptr i16, i16* %pIn2, i32 %3
+  %4 = sub i32 0, %xtraiter
+  br label %for.body.epil
+
+for.body.epil:
+  %lsr.iv5 = phi i32 [ %4, %for.body.epil.preheader ], [ %lsr.iv.next, %for.body.epil ]
+  %lsr.iv3 = phi i16* [ %scevgep2, %for.body.epil.preheader ], [ %scevgep4, %for.body.epil ]
+  %lsr.iv = phi i16* [ %scevgep, %for.body.epil.preheader ], [ %scevgep1, %for.body.epil ]
+  %sum.010.epil = phi i32 [ %add.epil, %for.body.epil ], [ %sum.010.unr, %for.body.epil.preheader ]
+  %5 = load i16, i16* %lsr.iv3, align 2
+  %conv.epil = sext i16 %5 to i32
+  %6 = load i16, i16* %lsr.iv, align 2
+  %conv2.epil = sext i16 %6 to i32
+  %mul.epil = mul nsw i32 %conv2.epil, %conv.epil
+  %add.epil = add nsw i32 %mul.epil, %sum.010.epil
+  %scevgep1 = getelementptr i16, i16* %lsr.iv, i32 1
+  %scevgep4 = getelementptr i16, i16* %lsr.iv3, i32 -1
+  %lsr.iv.next = add nsw i32 %lsr.iv5, 1
+  %epil.iter.cmp = icmp eq i32 %lsr.iv.next, 0
+  br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil
+
+for.cond.cleanup:
+  %sum.0.lcssa = phi i32 [ 0, %entry ], [ %add.lcssa.ph, %for.cond.cleanup.loopexit.unr-lcssa ], [ %add.epil, %for.body.epil ]
+  ret i32 %sum.0.lcssa
+
+for.body:
+  %pin2 = phi i16* [ %pin2_sub4, %for.body ], [ %scevgep11, %for.body.preheader.new ]
+  %pin1 = phi i16* [ %pin1_add4, %for.body ], [ %scevgep6, %for.body.preheader.new ]
+  %i.011 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
+  %sum.010 = phi i32 [ 0, %for.body.preheader.new ], [ %add.3, %for.body ]
+  %pin2_add1 = getelementptr i16, i16* %pin2, i32 1
+  %In2 = load i16, i16* %pin2_add1, align 2
+  %pin1_sub2 = getelementptr i16, i16* %pin1, i32 -2
+  %In1 = load i16, i16* %pin1_sub2, align 2
+  %In2.1 = load i16, i16* %pin2, align 2
+  %pin1_sub1 = getelementptr i16, i16* %pin1, i32 -1
+  %In1.1 = load i16, i16* %pin1_sub1, align 2
+  %pin2_sub1 = getelementptr i16, i16* %pin2, i32 -1
+  %In2.2 = load i16, i16* %pin2_sub1, align 2
+  %In1.2 = load i16, i16* %pin1, align 2
+  %pin2_sub2 = getelementptr i16, i16* %pin2, i32 -2
+  %In2.3 = load i16, i16* %pin2_sub2, align 2
+  %pin1_add1 = getelementptr i16, i16* %pin1, i32 1
+  %In1.3 = load i16, i16* %pin1_add1, align 2
+  %sextIn2 = sext i16 %In2 to i32
+  %sextIn1 = sext i16 %In1 to i32
+  %sextIn2.1 = sext i16 %In2.1 to i32
+  %sextIn1.1 = sext i16 %In1.1 to i32
+  %sextIn2.2 = sext i16 %In2.2 to i32
+  %sextIn1.2 = sext i16 %In1.2 to i32
+  %sextIn2.3 = sext i16 %In2.3 to i32
+  %sextIn1.3 = sext i16 %In1.3 to i32
+  %mul = mul nsw i32 %sextIn2, %sextIn1
+  %add = add nsw i32 %mul, %sum.010
+  %mul.1 = mul nsw i32 %sextIn2.1, %sextIn1.1
+  %add.1 = add nsw i32 %mul.1, %add
+  %mul.2 = mul nsw i32 %sextIn2.2, %sextIn1.2
+  %add.2 = add nsw i32 %mul.2, %add.1
+  %mul.3 = mul nsw i32 %sextIn2.3, %sextIn1.3
+  %add.3 = add nsw i32 %mul.3, %add.2
+  %inc.3 = add i32 %i.011, 4
+  %pin1_add4 = getelementptr i16, i16* %pin1, i32 4
+  %pin2_sub4 = getelementptr i16, i16* %pin2, i32 -4
+  %niter.ncmp.3 = icmp eq i32 %unroll_iter, %inc.3
+  br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body
+}
diff --git a/llvm/test/CodeGen/ARM/smlaldx-1.ll b/llvm/test/CodeGen/ARM/smlaldx-1.ll
new file mode 100644
index 0000000..e615f20
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/smlaldx-1.ll
@@ -0,0 +1,249 @@
+; RUN: opt -mtriple=thumbv8m.main -mcpu=cortex-m33 -arm-parallel-dsp %s -S -o - | FileCheck %s
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m0 < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 -mattr=-dsp < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+
+define i64 @smlaldx(i16* nocapture readonly %pIn1, i16* nocapture readonly %pIn2, i32 %j, i32 %limit) {
+
+; CHECK-LABEL: smlaldx
+; CHECK: = phi i32 [ 0, %for.body.preheader.new ],
+; CHECK: [[ACC0:%[^ ]+]] = phi i64 [ 0, %for.body.preheader.new ], [ [[ACC2:%[^ ]+]], %for.body ]
+; CHECK: [[PIN23:%[^ ]+]] = bitcast i16* %pIn2.3 to i32*
+; CHECK: [[IN23:%[^ ]+]] = load i32, i32* [[PIN23]], align 2
+; CHECK: [[PIN12:%[^ ]+]] = bitcast i16* %pIn1.2 to i32*
+; CHECK: [[IN12:%[^ ]+]] = load i32, i32* [[PIN12]], align 2
+; CHECK: [[ACC1:%[^ ]+]] = call i64 @llvm.arm.smlaldx(i32 [[IN23]], i32 [[IN12]], i64 [[ACC0]])
+; CHECK: [[PIN21:%[^ ]+]] = bitcast i16* %pIn2.1 to i32*
+; CHECK: [[IN21:%[^ ]+]] = load i32, i32* [[PIN21]], align 2
+; CHECK: [[PIN10:%[^ ]+]] = bitcast i16* %pIn1.0 to i32*
+; CHECK: [[IN10:%[^ ]+]] = load i32, i32* [[PIN10]], align 2
+; CHECK: [[ACC2]] = call i64 @llvm.arm.smlaldx(i32 [[IN21]], i32 [[IN10]], i64 [[ACC1]])
+; CHECK-NOT: call i64 @llvm.arm.smlad
+; CHECK-UNSUPPORTED-NOT:  call i64 @llvm.arm.smlad
+
+entry:
+  %cmp9 = icmp eq i32 %limit, 0
+  br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:
+  %0 = add i32 %limit, -1
+  %xtraiter = and i32 %limit, 3
+  %1 = icmp ult i32 %0, 3
+  br i1 %1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
+
+for.body.preheader.new:
+  %unroll_iter = sub i32 %limit, %xtraiter
+  br label %for.body
+
+for.cond.cleanup.loopexit.unr-lcssa:
+  %add.lcssa.ph = phi i64 [ undef, %for.body.preheader ], [ %add.3, %for.body ]
+  %i.011.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ]
+  %sum.010.unr = phi i64 [ 0, %for.body.preheader ], [ %add.3, %for.body ]
+  %lcmp.mod = icmp eq i32 %xtraiter, 0
+  br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil
+
+for.body.epil:
+  %i.011.epil = phi i32 [ %inc.epil, %for.body.epil ], [ %i.011.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
+  %sum.010.epil = phi i64 [ %add.epil, %for.body.epil ], [ %sum.010.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
+  %epil.iter = phi i32 [ %epil.iter.sub, %for.body.epil ], [ %xtraiter, %for.cond.cleanup.loopexit.unr-lcssa ]
+  %sub.epil = sub i32 %j, %i.011.epil
+  %arrayidx.epil = getelementptr inbounds i16, i16* %pIn2, i32 %sub.epil
+  %2 = load i16, i16* %arrayidx.epil, align 2
+  %conv.epil = sext i16 %2 to i32
+  %arrayidx1.epil = getelementptr inbounds i16, i16* %pIn1, i32 %i.011.epil
+  %3 = load i16, i16* %arrayidx1.epil, align 2
+  %conv2.epil = sext i16 %3 to i32
+  %mul.epil = mul nsw i32 %conv2.epil, %conv.epil
+  %sext.mul.epil = sext i32 %mul.epil to i64
+  %add.epil = add nsw i64 %sext.mul.epil, %sum.010.epil
+  %inc.epil = add nuw i32 %i.011.epil, 1
+  %epil.iter.sub = add i32 %epil.iter, -1
+  %epil.iter.cmp = icmp eq i32 %epil.iter.sub, 0
+  br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil
+
+for.cond.cleanup:
+  %sum.0.lcssa = phi i64 [ 0, %entry ], [ %add.lcssa.ph, %for.cond.cleanup.loopexit.unr-lcssa ], [ %add.epil, %for.body.epil ]
+  ret i64 %sum.0.lcssa
+
+for.body:
+  %i.011 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
+  %sum.010 = phi i64 [ 0, %for.body.preheader.new ], [ %add.3, %for.body ]
+  %niter = phi i32 [ %unroll_iter, %for.body.preheader.new ], [ %niter.nsub.3, %for.body ]
+  %pIn2Base = phi i16* [ %pIn2, %for.body.preheader.new ], [ %pIn2.4, %for.body ]
+  %pIn2.0 = getelementptr inbounds i16, i16* %pIn2Base, i32 0
+  %In2 = load i16, i16* %pIn2.0, align 2
+  %pIn1.0 = getelementptr inbounds i16, i16* %pIn1, i32 %i.011
+  %In1 = load i16, i16* %pIn1.0, align 2
+  %inc = or i32 %i.011, 1
+  %pIn2.1 = getelementptr inbounds i16, i16* %pIn2Base, i32 -1
+  %In2.1 = load i16, i16* %pIn2.1, align 2
+  %pIn1.1 = getelementptr inbounds i16, i16* %pIn1, i32 %inc
+  %In1.1 = load i16, i16* %pIn1.1, align 2
+  %inc.1 = or i32 %i.011, 2
+  %pIn2.2 = getelementptr inbounds i16, i16* %pIn2Base, i32 -2
+  %In2.2 = load i16, i16* %pIn2.2, align 2
+  %pIn1.2 = getelementptr inbounds i16, i16* %pIn1, i32 %inc.1
+  %In1.2 = load i16, i16* %pIn1.2, align 2
+  %inc.2 = or i32 %i.011, 3
+  %pIn2.3 = getelementptr inbounds i16, i16* %pIn2Base, i32 -3
+  %In2.3 = load i16, i16* %pIn2.3, align 2
+  %pIn1.3 = getelementptr inbounds i16, i16* %pIn1, i32 %inc.2
+  %In1.3 = load i16, i16* %pIn1.3, align 2
+  %sextIn1 = sext i16 %In1 to i32
+  %sextIn1.1 = sext i16 %In1.1 to i32
+  %sextIn1.2 = sext i16 %In1.2 to i32
+  %sextIn1.3 = sext i16 %In1.3 to i32
+  %sextIn2 = sext i16 %In2 to i32
+  %sextIn2.1 = sext i16 %In2.1 to i32
+  %sextIn2.2 = sext i16 %In2.2 to i32
+  %sextIn2.3 = sext i16 %In2.3 to i32
+  %mul = mul nsw i32 %sextIn1, %sextIn2
+  %mul.1 = mul nsw i32 %sextIn1.1, %sextIn2.1
+  %mul.2 = mul nsw i32 %sextIn1.2, %sextIn2.2
+  %mul.3 = mul nsw i32 %sextIn1.3, %sextIn2.3
+  %sext.mul = sext i32 %mul to i64
+  %sext.mul.1 = sext i32 %mul.1 to i64
+  %sext.mul.2 = sext i32 %mul.2 to i64
+  %sext.mul.3 = sext i32 %mul.3 to i64
+  %add = add nsw i64 %sext.mul, %sum.010
+  %add.1 = add nsw i64 %sext.mul.1, %add
+  %add.2 = add nsw i64 %sext.mul.2, %add.1
+  %add.3 = add nsw i64 %sext.mul.3, %add.2
+  %inc.3 = add i32 %i.011, 4
+  %pIn2.4 = getelementptr inbounds i16, i16* %pIn2Base, i32 -4
+  %niter.nsub.3 = add i32 %niter, -4
+  %niter.ncmp.3 = icmp eq i32 %niter.nsub.3, 0
+  br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body
+}
+
+define i64 @smlaldx_swap(i16* nocapture readonly %pIn1, i16* nocapture readonly %pIn2, i32 %j, i32 %limit) {
+
+entry:
+  %cmp9 = icmp eq i32 %limit, 0
+  br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:
+  %0 = add i32 %limit, -1
+  %xtraiter = and i32 %limit, 3
+  %1 = icmp ult i32 %0, 3
+  br i1 %1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
+
+for.body.preheader.new:
+  %unroll_iter = sub i32 %limit, %xtraiter
+  %scevgep6 = getelementptr i16, i16* %pIn1, i32 2
+  %2 = add i32 %j, -1
+  %scevgep11 = getelementptr i16, i16* %pIn2, i32 %2
+  br label %for.body
+
+for.cond.cleanup.loopexit.unr-lcssa:
+  %add.lcssa.ph = phi i64 [ undef, %for.body.preheader ], [ %add.3, %for.body ]
+  %i.011.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ]
+  %sum.010.unr = phi i64 [ 0, %for.body.preheader ], [ %add.3, %for.body ]
+  %lcmp.mod = icmp eq i32 %xtraiter, 0
+  br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil.preheader
+
+for.body.epil.preheader:
+  %scevgep = getelementptr i16, i16* %pIn1, i32 %i.011.unr
+  %3 = sub i32 %j, %i.011.unr
+  %scevgep2 = getelementptr i16, i16* %pIn2, i32 %3
+  %4 = sub i32 0, %xtraiter
+  br label %for.body.epil
+
+for.body.epil:
+  %lsr.iv5 = phi i32 [ %4, %for.body.epil.preheader ], [ %lsr.iv.next, %for.body.epil ]
+  %lsr.iv3 = phi i16* [ %scevgep2, %for.body.epil.preheader ], [ %scevgep4, %for.body.epil ]
+  %lsr.iv = phi i16* [ %scevgep, %for.body.epil.preheader ], [ %scevgep1, %for.body.epil ]
+  %sum.010.epil = phi i64 [ %add.epil, %for.body.epil ], [ %sum.010.unr, %for.body.epil.preheader ]
+  %5 = load i16, i16* %lsr.iv3, align 2
+  %conv.epil = sext i16 %5 to i32
+  %6 = load i16, i16* %lsr.iv, align 2
+  %conv2.epil = sext i16 %6 to i32
+  %mul.epil = mul nsw i32 %conv2.epil, %conv.epil
+  %sext.mul.epil = sext i32 %mul.epil to i64
+  %add.epil = add nsw i64 %sext.mul.epil, %sum.010.epil
+  %scevgep1 = getelementptr i16, i16* %lsr.iv, i32 1
+  %scevgep4 = getelementptr i16, i16* %lsr.iv3, i32 -1
+  %lsr.iv.next = add nsw i32 %lsr.iv5, 1
+  %epil.iter.cmp = icmp eq i32 %lsr.iv.next, 0
+  br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil
+
+for.cond.cleanup:
+  %sum.0.lcssa = phi i64 [ 0, %entry ], [ %add.lcssa.ph, %for.cond.cleanup.loopexit.unr-lcssa ], [ %add.epil, %for.body.epil ]
+  ret i64 %sum.0.lcssa
+
+; CHECK-LABEL: smlaldx_swap
+; CHECK: for.body.preheader.new:
+; CHECK: [[PIN1Base:[^ ]+]] = getelementptr i16, i16* %pIn1
+; CHECK: [[PIN2Base:[^ ]+]] = getelementptr i16, i16* %pIn2
+
+; CHECK: for.body:
+; CHECK: [[PIN2:%[^ ]+]] = phi i16* [ [[PIN2_NEXT:%[^ ]+]], %for.body ], [ [[PIN2Base]], %for.body.preheader.new ]
+; CHECK: [[PIN1:%[^ ]+]] = phi i16* [ [[PIN1_NEXT:%[^ ]+]], %for.body ], [ [[PIN1Base]], %for.body.preheader.new ]
+; CHECK: [[IV:%[^ ]+]] = phi i32
+; CHECK: [[ACC0:%[^ ]+]] = phi i64 [ 0, %for.body.preheader.new ], [ [[ACC2:%[^ ]+]], %for.body ]
+
+; CHECK: [[PIN1_2:%[^ ]+]] = getelementptr i16, i16* [[PIN1]], i32 -2
+; CHECK: [[PIN2_2:%[^ ]+]] = getelementptr i16, i16* [[PIN2]], i32 -2
+
+; CHECK: [[PIN2_2_CAST:%[^ ]+]] = bitcast i16* [[PIN2_2]] to i32*
+; CHECK: [[IN2_2:%[^ ]+]] = load i32, i32* [[PIN2_2_CAST]], align 2
+; CHECK: [[PIN1_CAST:%[^ ]+]] = bitcast i16* [[PIN1]] to i32*
+; CHECK: [[IN1:%[^ ]+]] = load i32, i32* [[PIN1_CAST]], align 2
+; CHECK: [[ACC1:%[^ ]+]] = call i64 @llvm.arm.smlaldx(i32 [[IN2_2]], i32 [[IN1]], i64 [[ACC0]])
+
+; CHECK: [[PIN2_CAST:%[^ ]+]] = bitcast i16* [[PIN2]] to i32*
+; CHECK: [[IN2:%[^ ]+]] = load i32, i32* [[PIN2_CAST]], align 2
+; CHECK: [[PIN1_2_CAST:%[^ ]+]] = bitcast i16* [[PIN1_2]] to i32*
+; CHECK: [[IN1_2:%[^ ]+]] = load i32, i32* [[PIN1_2_CAST]], align 2
+; CHECK: [[ACC2]] = call i64 @llvm.arm.smlaldx(i32 [[IN2]], i32 [[IN1_2]], i64 [[ACC1]])
+
+; CHECK: [[PIN1_NEXT]] = getelementptr i16, i16* [[PIN1]], i32 4
+; CHECK: [[PIN2_NEXT]] = getelementptr i16, i16* [[PIN2]], i32 -4
+
+; CHECK-NOT: call i64 @llvm.arm.smlad
+; CHECK-UNSUPPORTED-NOT:  call i64 @llvm.arm.smlad
+
+for.body:
+  %pin2 = phi i16* [ %pin2.sub4, %for.body ], [ %scevgep11, %for.body.preheader.new ]
+  %pin1 = phi i16* [ %pin1.add4, %for.body ], [ %scevgep6, %for.body.preheader.new ]
+  %i.011 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
+  %sum.010 = phi i64 [ 0, %for.body.preheader.new ], [ %add.3, %for.body ]
+  %pin2.add1 = getelementptr i16, i16* %pin2, i32 1
+  %In2 = load i16, i16* %pin2.add1, align 2
+  %pin1.sub2 = getelementptr i16, i16* %pin1, i32 -2
+  %In1 = load i16, i16* %pin1.sub2, align 2
+  %In2.1 = load i16, i16* %pin2, align 2
+  %pin1.sub1 = getelementptr i16, i16* %pin1, i32 -1
+  %In1.1 = load i16, i16* %pin1.sub1, align 2
+  %pin2.sub1 = getelementptr i16, i16* %pin2, i32 -1
+  %In2.2 = load i16, i16* %pin2.sub1, align 2
+  %In1.2 = load i16, i16* %pin1, align 2
+  %pin2.sub2 = getelementptr i16, i16* %pin2, i32 -2
+  %In2.3 = load i16, i16* %pin2.sub2, align 2
+  %pin1.add1 = getelementptr i16, i16* %pin1, i32 1
+  %In1.3 = load i16, i16* %pin1.add1, align 2
+  %sextIn2 = sext i16 %In2 to i32
+  %sextIn1 = sext i16 %In1 to i32
+  %sextIn2.1 = sext i16 %In2.1 to i32
+  %sextIn1.1 = sext i16 %In1.1 to i32
+  %sextIn2.2 = sext i16 %In2.2 to i32
+  %sextIn1.2 = sext i16 %In1.2 to i32
+  %sextIn2.3 = sext i16 %In2.3 to i32
+  %sextIn1.3 = sext i16 %In1.3 to i32
+  %mul = mul nsw i32 %sextIn2, %sextIn1
+  %sext.mul = sext i32 %mul to i64
+  %add = add nsw i64 %sext.mul, %sum.010
+  %mul.1 = mul nsw i32 %sextIn2.1, %sextIn1.1
+  %sext.mul.1 = sext i32 %mul.1 to i64
+  %add.1 = add nsw i64 %sext.mul.1, %add
+  %mul.2 = mul nsw i32 %sextIn2.2, %sextIn1.2
+  %sext.mul.2 = sext i32 %mul.2 to i64
+  %add.2 = add nsw i64 %sext.mul.2, %add.1
+  %mul.3 = mul nsw i32 %sextIn2.3, %sextIn1.3
+  %sext.mul.3 = sext i32 %mul.3 to i64
+  %add.3 = add nsw i64 %sext.mul.3, %add.2
+  %inc.3 = add i32 %i.011, 4
+  %pin1.add4 = getelementptr i16, i16* %pin1, i32 4
+  %pin2.sub4 = getelementptr i16, i16* %pin2, i32 -4
+  %niter.ncmp.3 = icmp eq i32 %unroll_iter, %inc.3
+  br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body
+}
diff --git a/llvm/test/CodeGen/ARM/smlaldx-2.ll b/llvm/test/CodeGen/ARM/smlaldx-2.ll
new file mode 100644
index 0000000..a4b5a27
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/smlaldx-2.ll
@@ -0,0 +1,248 @@
+; RUN: opt -mtriple=thumbv8m.main -mcpu=cortex-m33 -arm-parallel-dsp %s -S -o - | FileCheck %s
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m0 < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 -mattr=-dsp < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+
+define i64 @smlaldx(i16* nocapture readonly %pIn1, i16* nocapture readonly %pIn2, i32 %j, i32 %limit) {
+
+; CHECK-LABEL: smlaldx
+; CHECK: = phi i32 [ 0, %for.body.preheader.new ],
+; CHECK: [[ACC0:%[^ ]+]] = phi i64 [ 0, %for.body.preheader.new ], [ [[ACC2:%[^ ]+]], %for.body ]
+; CHECK: [[PIN23:%[^ ]+]] = bitcast i16* %pIn2.3 to i32*
+; CHECK: [[IN23:%[^ ]+]] = load i32, i32* [[PIN23]], align 2
+; CHECK: [[PIN12:%[^ ]+]] = bitcast i16* %pIn1.2 to i32*
+; CHECK: [[IN12:%[^ ]+]] = load i32, i32* [[PIN12]], align 2
+; CHECK: [[ACC1:%[^ ]+]] = call i64 @llvm.arm.smlaldx(i32 [[IN23]], i32 [[IN12]], i64 [[ACC0]])
+; CHECK: [[PIN21:%[^ ]+]] = bitcast i16* %pIn2.1 to i32*
+; CHECK: [[IN21:%[^ ]+]] = load i32, i32* [[PIN21]], align 2
+; CHECK: [[PIN10:%[^ ]+]] = bitcast i16* %pIn1.0 to i32*
+; CHECK: [[IN10:%[^ ]+]] = load i32, i32* [[PIN10]], align 2
+; CHECK: [[ACC2]] = call i64 @llvm.arm.smlaldx(i32 [[IN21]], i32 [[IN10]], i64 [[ACC1]])
+; CHECK-NOT: call i64 @llvm.arm.smlad
+; CHECK-UNSUPPORTED-NOT:  call i64 @llvm.arm.smlad
+
+entry:
+  %cmp9 = icmp eq i32 %limit, 0
+  br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:
+  %0 = add i32 %limit, -1
+  %xtraiter = and i32 %limit, 3
+  %1 = icmp ult i32 %0, 3
+  br i1 %1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
+
+for.body.preheader.new:
+  %unroll_iter = sub i32 %limit, %xtraiter
+  br label %for.body
+
+for.cond.cleanup.loopexit.unr-lcssa:
+  %add.lcssa.ph = phi i64 [ undef, %for.body.preheader ], [ %add.3, %for.body ]
+  %i.011.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ]
+  %sum.010.unr = phi i64 [ 0, %for.body.preheader ], [ %add.3, %for.body ]
+  %lcmp.mod = icmp eq i32 %xtraiter, 0
+  br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil
+
+for.body.epil:
+  %i.011.epil = phi i32 [ %inc.epil, %for.body.epil ], [ %i.011.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
+  %sum.010.epil = phi i64 [ %add.epil, %for.body.epil ], [ %sum.010.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
+  %epil.iter = phi i32 [ %epil.iter.sub, %for.body.epil ], [ %xtraiter, %for.cond.cleanup.loopexit.unr-lcssa ]
+  %sub.epil = sub i32 %j, %i.011.epil
+  %arrayidx.epil = getelementptr inbounds i16, i16* %pIn2, i32 %sub.epil
+  %2 = load i16, i16* %arrayidx.epil, align 2
+  %conv.epil = sext i16 %2 to i32
+  %arrayidx1.epil = getelementptr inbounds i16, i16* %pIn1, i32 %i.011.epil
+  %3 = load i16, i16* %arrayidx1.epil, align 2
+  %conv2.epil = sext i16 %3 to i32
+  %mul.epil = mul nsw i32 %conv2.epil, %conv.epil
+  %sext.mul.epil = sext i32 %mul.epil to i64
+  %add.epil = add nsw i64 %sext.mul.epil, %sum.010.epil
+  %inc.epil = add nuw i32 %i.011.epil, 1
+  %epil.iter.sub = add i32 %epil.iter, -1
+  %epil.iter.cmp = icmp eq i32 %epil.iter.sub, 0
+  br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil
+
+for.cond.cleanup:
+  %sum.0.lcssa = phi i64 [ 0, %entry ], [ %add.lcssa.ph, %for.cond.cleanup.loopexit.unr-lcssa ], [ %add.epil, %for.body.epil ]
+  ret i64 %sum.0.lcssa
+
+for.body:
+  %i.011 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
+  %sum.010 = phi i64 [ 0, %for.body.preheader.new ], [ %add.3, %for.body ]
+  %niter = phi i32 [ %unroll_iter, %for.body.preheader.new ], [ %niter.nsub.3, %for.body ]
+  %pIn2Base = phi i16* [ %pIn2, %for.body.preheader.new ], [ %pIn2.4, %for.body ]
+  %pIn2.0 = getelementptr inbounds i16, i16* %pIn2Base, i32 0
+  %In2 = load i16, i16* %pIn2.0, align 2
+  %pIn1.0 = getelementptr inbounds i16, i16* %pIn1, i32 %i.011
+  %In1 = load i16, i16* %pIn1.0, align 2
+  %inc = or i32 %i.011, 1
+  %pIn2.1 = getelementptr inbounds i16, i16* %pIn2Base, i32 -1
+  %In2.1 = load i16, i16* %pIn2.1, align 2
+  %pIn1.1 = getelementptr inbounds i16, i16* %pIn1, i32 %inc
+  %In1.1 = load i16, i16* %pIn1.1, align 2
+  %inc.1 = or i32 %i.011, 2
+  %pIn2.2 = getelementptr inbounds i16, i16* %pIn2Base, i32 -2
+  %In2.2 = load i16, i16* %pIn2.2, align 2
+  %pIn1.2 = getelementptr inbounds i16, i16* %pIn1, i32 %inc.1
+  %In1.2 = load i16, i16* %pIn1.2, align 2
+  %inc.2 = or i32 %i.011, 3
+  %pIn2.3 = getelementptr inbounds i16, i16* %pIn2Base, i32 -3
+  %In2.3 = load i16, i16* %pIn2.3, align 2
+  %pIn1.3 = getelementptr inbounds i16, i16* %pIn1, i32 %inc.2
+  %In1.3 = load i16, i16* %pIn1.3, align 2
+  %sextIn1 = sext i16 %In1 to i32
+  %sextIn1.1 = sext i16 %In1.1 to i32
+  %sextIn1.2 = sext i16 %In1.2 to i32
+  %sextIn1.3 = sext i16 %In1.3 to i32
+  %sextIn2 = sext i16 %In2 to i32
+  %sextIn2.1 = sext i16 %In2.1 to i32
+  %sextIn2.2 = sext i16 %In2.2 to i32
+  %sextIn2.3 = sext i16 %In2.3 to i32
+  %mul = mul nsw i32 %sextIn1, %sextIn2
+  %mul.1 = mul nsw i32 %sextIn1.1, %sextIn2.1
+  %mul.2 = mul nsw i32 %sextIn1.2, %sextIn2.2
+  %mul.3 = mul nsw i32 %sextIn1.3, %sextIn2.3
+  %sext.mul = sext i32 %mul to i64
+  %sext.mul.1 = sext i32 %mul.1 to i64
+  %sext.mul.2 = sext i32 %mul.2 to i64
+  %sext.mul.3 = sext i32 %mul.3 to i64
+  %add = add nsw i64 %sum.010, %sext.mul
+  %add.1 = add nsw i64 %sext.mul.1, %add
+  %add.2 = add nsw i64 %add.1, %sext.mul.2
+  %add.3 = add nsw i64 %sext.mul.3, %add.2
+  %inc.3 = add i32 %i.011, 4
+  %pIn2.4 = getelementptr inbounds i16, i16* %pIn2Base, i32 -4
+  %niter.nsub.3 = add i32 %niter, -4
+  %niter.ncmp.3 = icmp eq i32 %niter.nsub.3, 0
+  br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body
+}
+
+define i64 @smlaldx_swap(i16* nocapture readonly %pIn1, i16* nocapture readonly %pIn2, i32 %j, i32 %limit) {
+
+entry:
+  %cmp9 = icmp eq i32 %limit, 0
+  br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:
+  %0 = add i32 %limit, -1
+  %xtraiter = and i32 %limit, 3
+  %1 = icmp ult i32 %0, 3
+  br i1 %1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
+
+for.body.preheader.new:
+  %unroll_iter = sub i32 %limit, %xtraiter
+  %scevgep6 = getelementptr i16, i16* %pIn1, i32 2
+  %2 = add i32 %j, -1
+  %scevgep11 = getelementptr i16, i16* %pIn2, i32 %2
+  br label %for.body
+
+for.cond.cleanup.loopexit.unr-lcssa:
+  %add.lcssa.ph = phi i64 [ undef, %for.body.preheader ], [ %add.3, %for.body ]
+  %i.011.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ]
+  %sum.010.unr = phi i64 [ 0, %for.body.preheader ], [ %add.3, %for.body ]
+  %lcmp.mod = icmp eq i32 %xtraiter, 0
+  br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil.preheader
+
+for.body.epil.preheader:
+  %scevgep = getelementptr i16, i16* %pIn1, i32 %i.011.unr
+  %3 = sub i32 %j, %i.011.unr
+  %scevgep2 = getelementptr i16, i16* %pIn2, i32 %3
+  %4 = sub i32 0, %xtraiter
+  br label %for.body.epil
+
+for.body.epil:
+  %lsr.iv5 = phi i32 [ %4, %for.body.epil.preheader ], [ %lsr.iv.next, %for.body.epil ]
+  %lsr.iv3 = phi i16* [ %scevgep2, %for.body.epil.preheader ], [ %scevgep4, %for.body.epil ]
+  %lsr.iv = phi i16* [ %scevgep, %for.body.epil.preheader ], [ %scevgep1, %for.body.epil ]
+  %sum.010.epil = phi i64 [ %add.epil, %for.body.epil ], [ %sum.010.unr, %for.body.epil.preheader ]
+  %5 = load i16, i16* %lsr.iv3, align 2
+  %conv.epil = sext i16 %5 to i32
+  %6 = load i16, i16* %lsr.iv, align 2
+  %conv2.epil = sext i16 %6 to i32
+  %mul.epil = mul nsw i32 %conv2.epil, %conv.epil
+  %sext.mul.epil = sext i32 %mul.epil to i64
+  %add.epil = add nsw i64 %sext.mul.epil, %sum.010.epil
+  %scevgep1 = getelementptr i16, i16* %lsr.iv, i32 1
+  %scevgep4 = getelementptr i16, i16* %lsr.iv3, i32 -1
+  %lsr.iv.next = add nsw i32 %lsr.iv5, 1
+  %epil.iter.cmp = icmp eq i32 %lsr.iv.next, 0
+  br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil
+
+for.cond.cleanup:
+  %sum.0.lcssa = phi i64 [ 0, %entry ], [ %add.lcssa.ph, %for.cond.cleanup.loopexit.unr-lcssa ], [ %add.epil, %for.body.epil ]
+  ret i64 %sum.0.lcssa
+
+; CHECK-LABEL: smlaldx_swap
+; CHECK: for.body.preheader.new:
+; CHECK: [[PIN1Base:[^ ]+]] = getelementptr i16, i16* %pIn1
+; CHECK: [[PIN2Base:[^ ]+]] = getelementptr i16, i16* %pIn2
+
+; CHECK: for.body:
+; CHECK: [[PIN2:%[^ ]+]] = phi i16* [ [[PIN2_NEXT:%[^ ]+]], %for.body ], [ [[PIN2Base]], %for.body.preheader.new ]
+; CHECK: [[PIN1:%[^ ]+]] = phi i16* [ [[PIN1_NEXT:%[^ ]+]], %for.body ], [ [[PIN1Base]], %for.body.preheader.new ]
+; CHECK: [[IV:%[^ ]+]] = phi i32
+; CHECK: [[ACC0:%[^ ]+]] = phi i64 [ 0, %for.body.preheader.new ], [ [[ACC2:%[^ ]+]], %for.body ]
+; CHECK: [[PIN1_2:%[^ ]+]] = getelementptr i16, i16* [[PIN1]], i32 -2
+; CHECK: [[PIN2_2:%[^ ]+]] = getelementptr i16, i16* [[PIN2]], i32 -2
+
+; CHECK: [[PIN2_CAST:%[^ ]+]] = bitcast i16* [[PIN2]] to i32*
+; CHECK: [[IN2:%[^ ]+]] = load i32, i32* [[PIN2_CAST]], align 2
+; CHECK: [[PIN1_2_CAST:%[^ ]+]] = bitcast i16* [[PIN1_2]] to i32*
+; CHECK: [[IN1_2:%[^ ]+]] = load i32, i32* [[PIN1_2_CAST]], align 2
+; CHECK: [[ACC1:%[^ ]+]] = call i64 @llvm.arm.smlaldx(i32 [[IN2]], i32 [[IN1_2]], i64 [[ACC0]])
+
+; CHECK: [[PIN1_CAST:%[^ ]+]] = bitcast i16* [[PIN1]] to i32*
+; CHECK: [[IN1:%[^ ]+]] = load i32, i32* [[PIN1_CAST]], align 2
+; CHECK: [[PIN2_2_CAST:%[^ ]+]] = bitcast i16* [[PIN2_2]] to i32*
+; CHECK: [[IN2_2:%[^ ]+]] = load i32, i32* [[PIN2_2_CAST]], align 2
+; CHECK: [[ACC2]] = call i64 @llvm.arm.smlaldx(i32 [[IN1]], i32 [[IN2_2]], i64 [[ACC1]])
+
+; CHECK: [[PIN1_NEXT]] = getelementptr i16, i16* [[PIN1]], i32 4
+; CHECK: [[PIN2_NEXT]] = getelementptr i16, i16* [[PIN2]], i32 -4
+
+; CHECK-NOT: call i64 @llvm.arm.smlad
+; CHECK-UNSUPPORTED-NOT:  call i64 @llvm.arm.smlad
+
+for.body:
+  %pin2 = phi i16* [ %pin2.sub4, %for.body ], [ %scevgep11, %for.body.preheader.new ]
+  %pin1 = phi i16* [ %pin1.add4, %for.body ], [ %scevgep6, %for.body.preheader.new ]
+  %i.011 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
+  %sum.010 = phi i64 [ 0, %for.body.preheader.new ], [ %add.3, %for.body ]
+  %pin2.add1 = getelementptr i16, i16* %pin2, i32 1
+  %In2 = load i16, i16* %pin2.add1, align 2
+  %pin1.sub2 = getelementptr i16, i16* %pin1, i32 -2
+  %In1 = load i16, i16* %pin1.sub2, align 2
+  %In2.1 = load i16, i16* %pin2, align 2
+  %pin1.sub1 = getelementptr i16, i16* %pin1, i32 -1
+  %In1.1 = load i16, i16* %pin1.sub1, align 2
+  %pin2.sub1 = getelementptr i16, i16* %pin2, i32 -1
+  %In2.2 = load i16, i16* %pin2.sub1, align 2
+  %In1.2 = load i16, i16* %pin1, align 2
+  %pin2.sub2 = getelementptr i16, i16* %pin2, i32 -2
+  %In2.3 = load i16, i16* %pin2.sub2, align 2
+  %pin1.add1 = getelementptr i16, i16* %pin1, i32 1
+  %In1.3 = load i16, i16* %pin1.add1, align 2
+  %sextIn2 = sext i16 %In2 to i32
+  %sextIn1 = sext i16 %In1 to i32
+  %sextIn2.1 = sext i16 %In2.1 to i32
+  %sextIn1.1 = sext i16 %In1.1 to i32
+  %sextIn2.2 = sext i16 %In2.2 to i32
+  %sextIn1.2 = sext i16 %In1.2 to i32
+  %sextIn2.3 = sext i16 %In2.3 to i32
+  %sextIn1.3 = sext i16 %In1.3 to i32
+  %mul = mul nsw i32 %sextIn2, %sextIn1
+  %sext.mul = sext i32 %mul to i64
+  %add = add nsw i64 %sext.mul, %sum.010
+  %mul.1 = mul nsw i32 %sextIn2.1, %sextIn1.1
+  %sext.mul.1 = sext i32 %mul.1 to i64
+  %add.1 = add nsw i64 %sext.mul.1, %add
+  %mul.2 = mul nsw i32 %sextIn2.2, %sextIn1.2
+  %sext.mul.2 = sext i32 %mul.2 to i64
+  %add.2 = add nsw i64 %add.1, %sext.mul.2
+  %mul.3 = mul nsw i32 %sextIn2.3, %sextIn1.3
+  %sext.mul.3 = sext i32 %mul.3 to i64
+  %add.3 = add nsw i64 %add.2, %sext.mul.3
+  %inc.3 = add i32 %i.011, 4
+  %pin1.add4 = getelementptr i16, i16* %pin1, i32 4
+  %pin2.sub4 = getelementptr i16, i16* %pin2, i32 -4
+  %niter.ncmp.3 = icmp eq i32 %unroll_iter, %inc.3
+  br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body
+}