Add sub/mul overflow intrinsics. This currently doesn't have a
target-independent way of determining overflow on multiplication. It's very
tricky. Patch by Zoltan Varga!
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@60800 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 44f28d3..7e6ad09 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -780,11 +780,19 @@
// We want to custom lower some of our intrinsics.
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
- // Add with overflow operations are custom lowered.
+ // Add/Sub/Mul with overflow operations are custom lowered.
setOperationAction(ISD::SADDO, MVT::i32, Custom);
setOperationAction(ISD::SADDO, MVT::i64, Custom);
setOperationAction(ISD::UADDO, MVT::i32, Custom);
setOperationAction(ISD::UADDO, MVT::i64, Custom);
+ setOperationAction(ISD::SSUBO, MVT::i32, Custom);
+ setOperationAction(ISD::SSUBO, MVT::i64, Custom);
+ setOperationAction(ISD::USUBO, MVT::i32, Custom);
+ setOperationAction(ISD::USUBO, MVT::i64, Custom);
+ setOperationAction(ISD::SMULO, MVT::i32, Custom);
+ setOperationAction(ISD::SMULO, MVT::i64, Custom);
+ setOperationAction(ISD::UMULO, MVT::i32, Custom);
+ setOperationAction(ISD::UMULO, MVT::i64, Custom);
// We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
@@ -5202,8 +5210,10 @@
if (Cond.getOpcode() == ISD::SETCC)
Cond = LowerSETCC(Cond, DAG);
- else if (Cond.getOpcode() == ISD::SADDO || Cond.getOpcode() == ISD::UADDO)
- Cond = LowerXADDO(Cond, DAG);
+ else if (Cond.getOpcode() == ISD::SADDO || Cond.getOpcode() == ISD::UADDO ||
+ Cond.getOpcode() == ISD::SSUBO || Cond.getOpcode() == ISD::USUBO ||
+ Cond.getOpcode() == ISD::SMULO || Cond.getOpcode() == ISD::UMULO)
+ Cond = LowerXALUO(Cond, DAG);
// If condition flag is set by a X86ISD::CMP, then use it as the condition
// setting operand in place of the X86ISD::SETCC.
@@ -6118,23 +6128,52 @@
return Op;
}
-SDValue X86TargetLowering::LowerXADDO(SDValue Op, SelectionDAG &DAG) {
- // Lower the "add with overflow" instruction into a regular "add" plus a
- // "setcc" instruction that checks the overflow flag. The "brcond" lowering
+SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) {
+ // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
+ // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
// looks for this combo and may remove the "setcc" instruction if the "setcc"
// has only one use.
SDNode *N = Op.getNode();
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
+ unsigned BaseOp = 0;
+ unsigned Cond = 0;
+
+ switch (Op.getOpcode()) {
+ default: assert(0 && "Unknown ovf instruction!");
+ case ISD::SADDO:
+ BaseOp = ISD::ADD;
+ Cond = X86::COND_O;
+ break;
+ case ISD::UADDO:
+ BaseOp = ISD::ADD;
+ Cond = X86::COND_C;
+ break;
+ case ISD::SSUBO:
+ BaseOp = ISD::SUB;
+ Cond = X86::COND_O;
+ break;
+ case ISD::USUBO:
+ BaseOp = ISD::SUB;
+ Cond = X86::COND_C;
+ break;
+ case ISD::SMULO:
+ BaseOp = ISD::MUL;
+ Cond = X86::COND_O;
+ break;
+ case ISD::UMULO:
+ BaseOp = ISD::MUL;
+ Cond = X86::COND_C;
+ break;
+ }
// Also sets EFLAGS.
SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
- SDValue Sum = DAG.getNode(ISD::ADD, VTs, LHS, RHS);
+ SDValue Sum = DAG.getNode(BaseOp, VTs, LHS, RHS);
SDValue SetCC =
DAG.getNode(X86ISD::SETCC, N->getValueType(1),
- DAG.getConstant((Op.getOpcode() == ISD::SADDO) ?
- X86::COND_O : X86::COND_C,
+ DAG.getConstant(Cond,
MVT::i32), SDValue(Sum.getNode(), 1));
DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), SetCC);
@@ -6259,8 +6298,12 @@
case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
case ISD::CTLZ: return LowerCTLZ(Op, DAG);
case ISD::CTTZ: return LowerCTTZ(Op, DAG);
- case ISD::SADDO: return LowerXADDO(Op, DAG);
- case ISD::UADDO: return LowerXADDO(Op, DAG);
+ case ISD::SADDO:
+ case ISD::UADDO:
+ case ISD::SSUBO:
+ case ISD::USUBO:
+ case ISD::SMULO:
+ case ISD::UMULO: return LowerXALUO(Op, DAG);
case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG);
}
}
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index 4a85444..6eb78f6 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -593,7 +593,7 @@
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG);
SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG);
SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG);
- SDValue LowerXADDO(SDValue Op, SelectionDAG &DAG);
+ SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG);
SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG);
SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG);
diff --git a/lib/Target/X86/X86Instr64bit.td b/lib/Target/X86/X86Instr64bit.td
index 5f63e56..d6d08b9 100644
--- a/lib/Target/X86/X86Instr64bit.td
+++ b/lib/Target/X86/X86Instr64bit.td
@@ -379,29 +379,36 @@
let isTwoAddress = 1 in {
def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sub GR64:$src1, GR64:$src2))]>;
+ [(set GR64:$dst, (sub GR64:$src1, GR64:$src2)),
+ (implicit EFLAGS)]>;
def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2)))]>;
+ [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>;
def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2))]>;
+ [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2)),
+ (implicit EFLAGS)]>;
def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2))]>;
+ [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2)),
+ (implicit EFLAGS)]>;
} // isTwoAddress
def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), GR64:$src2), addr:$dst)]>;
+ [(store (sub (load addr:$dst), GR64:$src2), addr:$dst),
+ (implicit EFLAGS)]>;
def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), i64immSExt32:$src2), addr:$dst)]>;
+ [(store (sub (load addr:$dst), i64immSExt32:$src2), addr:$dst),
+ (implicit EFLAGS)]>;
def SUB64mi8 : RIi8<0x83, MRM5m, (outs), (ins i64mem:$dst, i64i8imm :$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>;
+ [(store (sub (load addr:$dst), i64immSExt8:$src2), addr:$dst),
+ (implicit EFLAGS)]>;
let Uses = [EFLAGS] in {
let isTwoAddress = 1 in {
@@ -454,30 +461,36 @@
let isCommutable = 1 in
def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
"imul{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, GR64:$src2))]>, TB;
+ [(set GR64:$dst, (mul GR64:$src1, GR64:$src2)),
+ (implicit EFLAGS)]>, TB;
def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
"imul{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2)))]>, TB;
+ [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>, TB;
} // isTwoAddress
// Suprisingly enough, these are not two address instructions!
def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
(outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2))]>;
+ [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2)),
+ (implicit EFLAGS)]>;
def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
(outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2))]>;
+ [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2)),
+ (implicit EFLAGS)]>;
def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
(outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, (mul (load addr:$src1), i64immSExt32:$src2))]>;
+ [(set GR64:$dst, (mul (load addr:$src1), i64immSExt32:$src2)),
+ (implicit EFLAGS)]>;
def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
(outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, (mul (load addr:$src1), i64immSExt8:$src2))]>;
+ [(set GR64:$dst, (mul (load addr:$src1), i64immSExt8:$src2)),
+ (implicit EFLAGS)]>;
} // Defs = [EFLAGS]
// Unsigned division / remainder
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index 37579e8..3834f84 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -709,10 +709,10 @@
// FIXME: Used for 8-bit mul, ignore result upper 8 bits.
// This probably ought to be moved to a def : Pat<> if the
// syntax can be accepted.
- [(set AL, (mul AL, GR8:$src))]>; // AL,AH = AL*GR8
+ [(set AL, (mul AL, GR8:$src))]>; // AL,AH = AL*GR8
let Defs = [AX,DX,EFLAGS], Uses = [AX], neverHasSideEffects = 1 in
-def MUL16r : I<0xF7, MRM4r, (outs), (ins GR16:$src), "mul{w}\t$src", []>,
- OpSize; // AX,DX = AX*GR16
+def MUL16r : I<0xF7, MRM4r, (outs), (ins GR16:$src), "mul{w}\t$src",
+ []>, OpSize; // AX,DX = AX*GR16
let Defs = [EAX,EDX,EFLAGS], Uses = [EAX], neverHasSideEffects = 1 in
def MUL32r : I<0xF7, MRM4r, (outs), (ins GR32:$src), "mul{l}\t$src", []>;
// EAX,EDX = EAX*GR32
@@ -2054,67 +2054,82 @@
def SUB8rr : I<0x28, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (sub GR8:$src1, GR8:$src2))]>;
+ [(set GR8:$dst, (sub GR8:$src1, GR8:$src2)),
+ (implicit EFLAGS)]>;
def SUB16rr : I<0x29, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (sub GR16:$src1, GR16:$src2))]>, OpSize;
+ [(set GR16:$dst, (sub GR16:$src1, GR16:$src2)),
+ (implicit EFLAGS)]>, OpSize;
def SUB32rr : I<0x29, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (sub GR32:$src1, GR32:$src2))]>;
+ [(set GR32:$dst, (sub GR32:$src1, GR32:$src2)),
+ (implicit EFLAGS)]>;
def SUB8rm : I<0x2A, MRMSrcMem, (outs GR8 :$dst), (ins GR8 :$src1, i8mem :$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (sub GR8:$src1, (load addr:$src2)))]>;
+ [(set GR8:$dst, (sub GR8:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>;
def SUB16rm : I<0x2B, MRMSrcMem, (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (sub GR16:$src1, (load addr:$src2)))]>, OpSize;
+ [(set GR16:$dst, (sub GR16:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>, OpSize;
def SUB32rm : I<0x2B, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (sub GR32:$src1, (load addr:$src2)))]>;
+ [(set GR32:$dst, (sub GR32:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>;
def SUB8ri : Ii8 <0x80, MRM5r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (sub GR8:$src1, imm:$src2))]>;
+ [(set GR8:$dst, (sub GR8:$src1, imm:$src2)),
+ (implicit EFLAGS)]>;
def SUB16ri : Ii16<0x81, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (sub GR16:$src1, imm:$src2))]>, OpSize;
+ [(set GR16:$dst, (sub GR16:$src1, imm:$src2)),
+ (implicit EFLAGS)]>, OpSize;
def SUB32ri : Ii32<0x81, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (sub GR32:$src1, imm:$src2))]>;
+ [(set GR32:$dst, (sub GR32:$src1, imm:$src2)),
+ (implicit EFLAGS)]>;
def SUB16ri8 : Ii8<0x83, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (sub GR16:$src1, i16immSExt8:$src2))]>,
- OpSize;
+ [(set GR16:$dst, (sub GR16:$src1, i16immSExt8:$src2)),
+ (implicit EFLAGS)]>, OpSize;
def SUB32ri8 : Ii8<0x83, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (sub GR32:$src1, i32immSExt8:$src2))]>;
+ [(set GR32:$dst, (sub GR32:$src1, i32immSExt8:$src2)),
+ (implicit EFLAGS)]>;
let isTwoAddress = 0 in {
def SUB8mr : I<0x28, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), GR8:$src2), addr:$dst)]>;
+ [(store (sub (load addr:$dst), GR8:$src2), addr:$dst),
+ (implicit EFLAGS)]>;
def SUB16mr : I<0x29, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), GR16:$src2), addr:$dst)]>,
- OpSize;
+ [(store (sub (load addr:$dst), GR16:$src2), addr:$dst),
+ (implicit EFLAGS)]>, OpSize;
def SUB32mr : I<0x29, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), GR32:$src2), addr:$dst)]>;
+ [(store (sub (load addr:$dst), GR32:$src2), addr:$dst),
+ (implicit EFLAGS)]>;
def SUB8mi : Ii8<0x80, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (loadi8 addr:$dst), imm:$src2), addr:$dst)]>;
+ [(store (sub (loadi8 addr:$dst), imm:$src2), addr:$dst),
+ (implicit EFLAGS)]>;
def SUB16mi : Ii16<0x81, MRM5m, (outs), (ins i16mem:$dst, i16imm:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (loadi16 addr:$dst), imm:$src2), addr:$dst)]>,
- OpSize;
+ [(store (sub (loadi16 addr:$dst), imm:$src2), addr:$dst),
+ (implicit EFLAGS)]>, OpSize;
def SUB32mi : Ii32<0x81, MRM5m, (outs), (ins i32mem:$dst, i32imm:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (loadi32 addr:$dst), imm:$src2), addr:$dst)]>;
+ [(store (sub (loadi32 addr:$dst), imm:$src2), addr:$dst),
+ (implicit EFLAGS)]>;
def SUB16mi8 : Ii8<0x83, MRM5m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), i16immSExt8:$src2), addr:$dst)]>,
- OpSize;
+ [(store (sub (load addr:$dst), i16immSExt8:$src2), addr:$dst),
+ (implicit EFLAGS)]>, OpSize;
def SUB32mi8 : Ii8<0x83, MRM5m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
- [(store (sub (load addr:$dst), i32immSExt8:$src2), addr:$dst)]>;
+ [(store (sub (load addr:$dst), i32immSExt8:$src2), addr:$dst),
+ (implicit EFLAGS)]>;
}
let Uses = [EFLAGS] in {
@@ -2152,18 +2167,22 @@
let isCommutable = 1 in { // X = IMUL Y, Z --> X = IMUL Z, Y
def IMUL16rr : I<0xAF, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
"imul{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (mul GR16:$src1, GR16:$src2))]>, TB, OpSize;
+ [(set GR16:$dst, (mul GR16:$src1, GR16:$src2)),
+ (implicit EFLAGS)]>, TB, OpSize;
def IMUL32rr : I<0xAF, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"imul{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (mul GR32:$src1, GR32:$src2))]>, TB;
+ [(set GR32:$dst, (mul GR32:$src1, GR32:$src2)),
+ (implicit EFLAGS)]>, TB;
}
def IMUL16rm : I<0xAF, MRMSrcMem, (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
"imul{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (mul GR16:$src1, (load addr:$src2)))]>,
+ [(set GR16:$dst, (mul GR16:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>,
TB, OpSize;
def IMUL32rm : I<0xAF, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
"imul{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (mul GR32:$src1, (load addr:$src2)))]>, TB;
+ [(set GR32:$dst, (mul GR32:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>, TB;
} // Defs = [EFLAGS]
} // end Two Address instructions
@@ -2172,39 +2191,44 @@
def IMUL16rri : Ii16<0x69, MRMSrcReg, // GR16 = GR16*I16
(outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR16:$dst, (mul GR16:$src1, imm:$src2))]>, OpSize;
+ [(set GR16:$dst, (mul GR16:$src1, imm:$src2)),
+ (implicit EFLAGS)]>, OpSize;
def IMUL32rri : Ii32<0x69, MRMSrcReg, // GR32 = GR32*I32
(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR32:$dst, (mul GR32:$src1, imm:$src2))]>;
+ [(set GR32:$dst, (mul GR32:$src1, imm:$src2)),
+ (implicit EFLAGS)]>;
def IMUL16rri8 : Ii8<0x6B, MRMSrcReg, // GR16 = GR16*I8
(outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR16:$dst, (mul GR16:$src1, i16immSExt8:$src2))]>,
- OpSize;
+ [(set GR16:$dst, (mul GR16:$src1, i16immSExt8:$src2)),
+ (implicit EFLAGS)]>, OpSize;
def IMUL32rri8 : Ii8<0x6B, MRMSrcReg, // GR32 = GR32*I8
(outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR32:$dst, (mul GR32:$src1, i32immSExt8:$src2))]>;
+ [(set GR32:$dst, (mul GR32:$src1, i32immSExt8:$src2)),
+ (implicit EFLAGS)]>;
def IMUL16rmi : Ii16<0x69, MRMSrcMem, // GR16 = [mem16]*I16
(outs GR16:$dst), (ins i16mem:$src1, i16imm:$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR16:$dst, (mul (load addr:$src1), imm:$src2))]>,
- OpSize;
+ [(set GR16:$dst, (mul (load addr:$src1), imm:$src2)),
+ (implicit EFLAGS)]>, OpSize;
def IMUL32rmi : Ii32<0x69, MRMSrcMem, // GR32 = [mem32]*I32
(outs GR32:$dst), (ins i32mem:$src1, i32imm:$src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR32:$dst, (mul (load addr:$src1), imm:$src2))]>;
+ [(set GR32:$dst, (mul (load addr:$src1), imm:$src2)),
+ (implicit EFLAGS)]>;
def IMUL16rmi8 : Ii8<0x6B, MRMSrcMem, // GR16 = [mem16]*I8
(outs GR16:$dst), (ins i16mem:$src1, i16i8imm :$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR16:$dst, (mul (load addr:$src1), i16immSExt8:$src2))]>,
- OpSize;
+ [(set GR16:$dst, (mul (load addr:$src1), i16immSExt8:$src2)),
+ (implicit EFLAGS)]>, OpSize;
def IMUL32rmi8 : Ii8<0x6B, MRMSrcMem, // GR32 = [mem32]*I8
(outs GR32:$dst), (ins i32mem:$src1, i32i8imm: $src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR32:$dst, (mul (load addr:$src1), i32immSExt8:$src2))]>;
+ [(set GR32:$dst, (mul (load addr:$src1), i32immSExt8:$src2)),
+ (implicit EFLAGS)]>;
} // Defs = [EFLAGS]
//===----------------------------------------------------------------------===//