Revert r55018 and apply the correct "fix" for the 64-bit sub_and_fetch atomic.
Just expand it like the other X-bit sub_and_fetches.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@55023 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/TargetSelectionDAG.td b/lib/Target/TargetSelectionDAG.td
index eaca86a..5dba0bc 100644
--- a/lib/Target/TargetSelectionDAG.td
+++ b/lib/Target/TargetSelectionDAG.td
@@ -358,10 +358,10 @@
                     [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
 def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , STDAtomic2,
                     [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , STDAtomic2,
-                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
 def atomic_swap     : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2,
                     [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , STDAtomic2,
+                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
 def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , STDAtomic2,
                     [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
 def atomic_load_or  : SDNode<"ISD::ATOMIC_LOAD_OR" , STDAtomic2,
@@ -815,32 +815,6 @@
   return false;
 }]>;
 
-def atomic_load_sub_8 : PatFrag<(ops node:$ptr, node:$dec),
-                    (atomic_load_sub node:$ptr, node:$dec), [{
-  if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
-        return V->getValueType(0) == MVT::i8;
-  return false;
-}]>;
-def atomic_load_sub_16 : PatFrag<(ops node:$ptr, node:$dec), 
-                    (atomic_load_sub node:$ptr, node:$dec), [{
-  if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
-        return V->getValueType(0) == MVT::i16;
-  return false;
-}]>;
-def atomic_load_sub_32 : PatFrag<(ops node:$ptr, node:$dec), 
-                    (atomic_load_sub node:$ptr, node:$dec), [{
-  if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
-        return V->getValueType(0) == MVT::i32;
-  return false;
-}]>;
-def atomic_load_sub_64 : PatFrag<(ops node:$ptr, node:$dec), 
-                    (atomic_load_sub node:$ptr, node:$dec), [{
-  if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
-        return V->getValueType(0) == MVT::i64;
-  return false;
-}]>;
-
-
 def atomic_swap_8 : PatFrag<(ops node:$ptr, node:$inc),
                     (atomic_swap node:$ptr, node:$inc), [{
   if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
@@ -867,6 +841,7 @@
 }]>;
 
 
+
 // setcc convenience fragments.
 def setoeq : PatFrag<(ops node:$lhs, node:$rhs),
                      (setcc node:$lhs, node:$rhs, SETOEQ)>;
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 7cf74bf..2fb9a2e 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -297,9 +297,11 @@
   setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i16, Custom);
   setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i32, Custom);
   setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i64, Custom);
+
   setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i8, Expand);
   setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i16, Expand);
   setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i32, Expand);
+  setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i64, Expand);
 
   // Use the default ISD::DBG_STOPPOINT, ISD::DECLARE expansion.
   setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
diff --git a/lib/Target/X86/X86Instr64bit.td b/lib/Target/X86/X86Instr64bit.td
index 81abc29..e49a548 100644
--- a/lib/Target/X86/X86Instr64bit.td
+++ b/lib/Target/X86/X86Instr64bit.td
@@ -1143,13 +1143,6 @@
                "lock\n\txadd\t$val, $ptr", 
                [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
                 TB, LOCK;
-
-let Defs = [EFLAGS] in
-def LXSUB64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
-               "lock\n\txadd\t$val, $ptr", 
-               [(set GR64:$dst, (atomic_load_sub_64 addr:$ptr, GR64:$val))]>,
-                TB, LOCK;
-
 def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
                   "xchg\t$val, $ptr", 
                   [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>;
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index 7b5ee91..37a5fed 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -2634,22 +2634,6 @@
                 TB, LOCK;
 }
 
-// Atomic exchange and subtract
-let Constraints = "$val = $dst", Defs = [EFLAGS] in {
-def LXSUB32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
-               "lock\n\txadd{l}\t{$val, $ptr|$ptr, $val}", 
-               [(set GR32:$dst, (atomic_load_sub_32 addr:$ptr, GR32:$val))]>,
-                TB, LOCK;
-def LXSUB16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
-               "lock\n\txadd{w}\t{$val, $ptr|$ptr, $val}", 
-               [(set GR16:$dst, (atomic_load_sub_16 addr:$ptr, GR16:$val))]>,
-                TB, OpSize, LOCK;
-def LXSUB8  : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val),
-               "lock\n\txadd{b}\t{$val, $ptr|$ptr, $val}", 
-               [(set GR8:$dst, (atomic_load_sub_8 addr:$ptr, GR8:$val))]>,
-                TB, LOCK;
-}
-
 // Atomic exchange, and, or, xor
 let Constraints = "$val = $dst", Defs = [EFLAGS],
                   usesCustomDAGSchedInserter = 1 in {