Split the ATOMIC NodeType's to include the size, e.g.
ATOMIC_LOAD_ADD_{8,16,32,64} instead of ATOMIC_LOAD_ADD.
Increased the Hardcoded Constant OpActionsCapacity to match.
Large but boring; no functional change.

This is to support partial-word atomics on ppc; i8 is
not a valid type there, so by the time we get to lowering, the
ATOMIC_LOAD nodes looks the same whether the type was i8 or i32.
The information can be added to the AtomicSDNode, but that is the
largest SDNode; I don't fully understand the SDNode allocation,
but it is sensitive to the largest node size, so increasing
that must be bad.  This is the alternative.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@55457 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 9cc1bed..3bc5592 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -293,15 +293,15 @@
     setOperationAction(ISD::MEMBARRIER    , MVT::Other, Expand);
 
   // Expand certain atomics
-  setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i8, Custom);
-  setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i16, Custom);
-  setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i32, Custom);
-  setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i64, Custom);
+  setOperationAction(ISD::ATOMIC_CMP_SWAP_8 , MVT::i8, Custom);
+  setOperationAction(ISD::ATOMIC_CMP_SWAP_16, MVT::i16, Custom);
+  setOperationAction(ISD::ATOMIC_CMP_SWAP_32, MVT::i32, Custom);
+  setOperationAction(ISD::ATOMIC_CMP_SWAP_64, MVT::i64, Custom);
 
-  setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i8, Expand);
-  setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i16, Expand);
-  setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i32, Expand);
-  setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i64, Expand);
+  setOperationAction(ISD::ATOMIC_LOAD_SUB_8, MVT::i8, Expand);
+  setOperationAction(ISD::ATOMIC_LOAD_SUB_16, MVT::i16, Expand);
+  setOperationAction(ISD::ATOMIC_LOAD_SUB_32, MVT::i32, Expand);
+  setOperationAction(ISD::ATOMIC_LOAD_SUB_64, MVT::i64, Expand);
 
   // Use the default ISD::DBG_STOPPOINT, ISD::DECLARE expansion.
   setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
@@ -5914,8 +5914,11 @@
   MVT T = Op->getValueType(0);
   SDValue negOp = DAG.getNode(ISD::SUB, T,
                                 DAG.getConstant(0, T), Op->getOperand(2));
-  return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, Op->getOperand(0),
-                       Op->getOperand(1), negOp,
+  return DAG.getAtomic((T==MVT::i8 ? ISD::ATOMIC_LOAD_ADD_8:
+                        T==MVT::i16 ? ISD::ATOMIC_LOAD_ADD_16:
+                        T==MVT::i32 ? ISD::ATOMIC_LOAD_ADD_32:
+                        T==MVT::i64 ? ISD::ATOMIC_LOAD_ADD_64: 0),
+                       Op->getOperand(0), Op->getOperand(1), negOp,
                        cast<AtomicSDNode>(Op)->getSrcValue(),
                        cast<AtomicSDNode>(Op)->getAlignment()).Val;
 }
@@ -5925,7 +5928,10 @@
 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
   switch (Op.getOpcode()) {
   default: assert(0 && "Should not custom lower this!");
-  case ISD::ATOMIC_CMP_SWAP:    return LowerCMP_SWAP(Op,DAG);
+  case ISD::ATOMIC_CMP_SWAP_8:  return LowerCMP_SWAP(Op,DAG);
+  case ISD::ATOMIC_CMP_SWAP_16: return LowerCMP_SWAP(Op,DAG);
+  case ISD::ATOMIC_CMP_SWAP_32: return LowerCMP_SWAP(Op,DAG);
+  case ISD::ATOMIC_CMP_SWAP_64: return LowerCMP_SWAP(Op,DAG);
   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
   case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
@@ -5979,8 +5985,11 @@
   default: assert(0 && "Should not custom lower this!");
   case ISD::FP_TO_SINT:         return ExpandFP_TO_SINT(N, DAG);
   case ISD::READCYCLECOUNTER:   return ExpandREADCYCLECOUNTER(N, DAG);
-  case ISD::ATOMIC_CMP_SWAP:    return ExpandATOMIC_CMP_SWAP(N, DAG);
-  case ISD::ATOMIC_LOAD_SUB:    return ExpandATOMIC_LOAD_SUB(N,DAG);
+  case ISD::ATOMIC_CMP_SWAP_64: return ExpandATOMIC_CMP_SWAP(N, DAG);
+  case ISD::ATOMIC_LOAD_SUB_8:  return ExpandATOMIC_LOAD_SUB(N,DAG);
+  case ISD::ATOMIC_LOAD_SUB_16: return ExpandATOMIC_LOAD_SUB(N,DAG);
+  case ISD::ATOMIC_LOAD_SUB_32: return ExpandATOMIC_LOAD_SUB(N,DAG);
+  case ISD::ATOMIC_LOAD_SUB_64: return ExpandATOMIC_LOAD_SUB(N,DAG);
   }
 }