Enable i16 to i32 promotion by default.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@102493 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp
index d6744d1..1c42639 100644
--- a/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -1854,6 +1854,9 @@
// Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
// use a smaller encoding.
+ if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse())
+ // Look past the truncate if CMP is the only use of it.
+ N0 = N0.getOperand(0);
if (N0.getNode()->getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
N0.getValueType() != MVT::i8 &&
X86::isZeroNode(N1)) {
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 3d87ffb..4c0c5f6 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -6075,7 +6075,7 @@
// the encoding for the i16 version is larger than the i32 version.
// Also promote i16 to i32 for performance / code size reason.
if (LHS.getValueType() == MVT::i8 ||
- (Subtarget->shouldPromote16Bit() && LHS.getValueType() == MVT::i16))
+ LHS.getValueType() == MVT::i16)
LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
// If the operand types disagree, extend the shift amount to match. Since
@@ -9949,7 +9949,7 @@
bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
if (!isTypeLegal(VT))
return false;
- if (!Subtarget->shouldPromote16Bit() || VT != MVT::i16)
+ if (VT != MVT::i16)
return true;
switch (Opc) {
@@ -9983,9 +9983,6 @@
/// beneficial for dag combiner to promote the specified node. If true, it
/// should return the desired promotion type by reference.
bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
- if (!Subtarget->shouldPromote16Bit())
- return false;
-
EVT VT = Op.getValueType();
if (VT != MVT::i16)
return false;
@@ -9998,10 +9995,16 @@
LoadSDNode *LD = cast<LoadSDNode>(Op);
// If the non-extending load has a single use and it's not live out, then it
// might be folded.
- if (LD->getExtensionType() == ISD::NON_EXTLOAD &&
- Op.hasOneUse() &&
- Op.getNode()->use_begin()->getOpcode() != ISD::CopyToReg)
- return false;
+ if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
+ Op.hasOneUse()*/) {
+ for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
+ UE = Op.getNode()->use_end(); UI != UE; ++UI) {
+ // The only case where we'd want to promote LOAD (rather then it being
+ // promoted as an operand is when it's only use is liveout.
+ if (UI->getOpcode() != ISD::CopyToReg)
+ return false;
+ }
+ }
Promote = true;
break;
}
@@ -10011,8 +10014,7 @@
Promote = true;
break;
case ISD::SHL:
- case ISD::SRL:
- {
+ case ISD::SRL: {
SDValue N0 = Op.getOperand(0);
// Look out for (store (shl (load), x)).
if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
diff --git a/lib/Target/X86/X86Instr64bit.td b/lib/Target/X86/X86Instr64bit.td
index 7b1cdbc..0b5ea3f 100644
--- a/lib/Target/X86/X86Instr64bit.td
+++ b/lib/Target/X86/X86Instr64bit.td
@@ -2086,6 +2086,11 @@
(EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
x86_subreg_8bit_hi))>,
Requires<[In64BitMode]>;
+def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
+ (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
+ GR32_ABCD)),
+ x86_subreg_8bit_hi))>,
+ Requires<[In64BitMode]>;
def : Pat<(srl GR16:$src, (i8 8)),
(EXTRACT_SUBREG
(MOVZX32_NOREXrr8
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index 751bbb4..460643a 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -331,8 +331,6 @@
def FastBTMem : Predicate<"!Subtarget->isBTMemSlow()">;
def CallImmAddr : Predicate<"Subtarget->IsLegalToCallImmediateAddr(TM)">;
def HasAES : Predicate<"Subtarget->hasAES()">;
-def Promote16Bit : Predicate<"Subtarget->shouldPromote16Bit()">;
-def NotPromote16Bit : Predicate<"!Subtarget->shouldPromote16Bit()">;
//===----------------------------------------------------------------------===//
// X86 Instruction Format Definitions.
@@ -4450,12 +4448,10 @@
// avoid partial-register updates.
def : Pat<(i16 (anyext GR8 :$src)), (MOVZX16rr8 GR8 :$src)>;
def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
-def : Pat<(i32 (anyext GR16:$src)), (MOVZX32rr16 GR16:$src)>,
- Requires<[NotPromote16Bit]>;
+// Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
def : Pat<(i32 (anyext GR16:$src)),
- (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, x86_subreg_16bit)>,
- Requires<[Promote16Bit]>;
+ (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, x86_subreg_16bit)>;
//===----------------------------------------------------------------------===//
@@ -4546,6 +4542,11 @@
GR32_ABCD)),
x86_subreg_8bit_hi))>,
Requires<[In32BitMode]>;
+def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
+ (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
+ GR32_ABCD)),
+ x86_subreg_8bit_hi))>,
+ Requires<[In32BitMode]>;
// (shl x, 1) ==> (add x, x)
def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp
index 7ac7b76..09a2685 100644
--- a/lib/Target/X86/X86Subtarget.cpp
+++ b/lib/Target/X86/X86Subtarget.cpp
@@ -16,7 +16,6 @@
#include "X86InstrInfo.h"
#include "X86GenSubtarget.inc"
#include "llvm/GlobalValue.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/System/Host.h"
@@ -25,10 +24,6 @@
#include "llvm/ADT/SmallVector.h"
using namespace llvm;
-static cl::opt<bool>
-DoPromote16Bit("promote-16bit", cl::Hidden,
- cl::desc("Promote 16-bit instructions"));
-
#if defined(_MSC_VER)
#include <intrin.h>
#endif
@@ -298,7 +293,6 @@
, IsBTMemSlow(false)
, IsUAMemFast(false)
, HasVectorUAMem(false)
- , Promote16Bit(DoPromote16Bit)
, DarwinVers(0)
, stackAlignment(8)
// FIXME: this is a known good value for Yonah. How about others?
diff --git a/lib/Target/X86/X86Subtarget.h b/lib/Target/X86/X86Subtarget.h
index dc99018..646af91 100644
--- a/lib/Target/X86/X86Subtarget.h
+++ b/lib/Target/X86/X86Subtarget.h
@@ -88,10 +88,6 @@
/// operands. This may require setting a feature bit in the processor.
bool HasVectorUAMem;
- /// Promote16Bit - True if codegen should promote 16-bit operations to 32-bit.
- /// This is a temporary option.
- bool Promote16Bit;
-
/// DarwinVers - Nonzero if this is a darwin platform: the numeric
/// version of the platform, e.g. 8 = 10.4 (Tiger), 9 = 10.5 (Leopard), etc.
unsigned char DarwinVers; // Is any darwin-x86 platform.
@@ -160,7 +156,6 @@
bool isBTMemSlow() const { return IsBTMemSlow; }
bool isUnalignedMemAccessFast() const { return IsUAMemFast; }
bool hasVectorUAMem() const { return HasVectorUAMem; }
- bool shouldPromote16Bit() const { return Promote16Bit; }
bool isTargetDarwin() const { return TargetType == isDarwin; }
bool isTargetELF() const { return TargetType == isELF; }