* add the shladd instruction
* fold left shifts of 1, 2, 3 or 4 bits into adds

  This doesn't save much now, but should get a serious workout once
  multiplies by constants get converted to shift/add/sub sequences.
  Hold on! :)


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@21282 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/IA64/IA64ISelPattern.cpp b/lib/Target/IA64/IA64ISelPattern.cpp
index c501fa1..7ca220b 100644
--- a/lib/Target/IA64/IA64ISelPattern.cpp
+++ b/lib/Target/IA64/IA64ISelPattern.cpp
@@ -873,6 +873,26 @@
       BuildMI(BB, IA64::FMA, 3, Result).addReg(Tmp1).addReg(Tmp2).addReg(Tmp3);
       return Result; // early exit
     }
+
+    if(DestType != MVT::f64 && N.getOperand(0).getOpcode() == ISD::SHL &&
+	N.getOperand(0).Val->hasOneUse()) { // if we might be able to fold
+                                            // this add into a shladd, try:
+      ConstantSDNode *CSD = NULL;
+      if((CSD = dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) &&
+	  (CSD->getValue() >= 1) && (CSD->getValue() <= 4) ) { // we can:
+
+	// ++FusedSHLADD; // Statistic
+	Tmp1 = SelectExpr(N.getOperand(0).getOperand(0));
+	int shl_amt = CSD->getValue();
+	Tmp3 = SelectExpr(N.getOperand(1));
+	
+	BuildMI(BB, IA64::SHLADD, 3, Result)
+	  .addReg(Tmp1).addImm(shl_amt).addReg(Tmp3);
+	return Result; // early exit
+      }
+    }
+
+    //else, fallthrough:
     Tmp1 = SelectExpr(N.getOperand(0));
     if(DestType != MVT::f64) { // integer addition:
         switch (ponderIntegerAdditionWith(N.getOperand(1), Tmp3)) {