Split the ATOMIC NodeType's to include the size, e.g.
ATOMIC_LOAD_ADD_{8,16,32,64} instead of ATOMIC_LOAD_ADD.
Increased the Hardcoded Constant OpActionsCapacity to match.
Large but boring; no functional change.
This is to support partial-word atomics on ppc; i8 is
not a valid type there, so by the time we get to lowering, the
ATOMIC_LOAD nodes looks the same whether the type was i8 or i32.
The information can be added to the AtomicSDNode, but that is the
largest SDNode; I don't fully understand the SDNode allocation,
but it is sensitive to the largest node size, so increasing
that must be bad. This is the alternative.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@55457 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/TargetSelectionDAG.td b/lib/Target/TargetSelectionDAG.td
index e85589f..230c956 100644
--- a/lib/Target/TargetSelectionDAG.td
+++ b/lib/Target/TargetSelectionDAG.td
@@ -358,30 +358,101 @@
def membarrier : SDNode<"ISD::MEMBARRIER" , STDMemBarrier,
[SDNPHasChain, SDNPSideEffect]>;
-// Do not use atomic_* directly, use atomic_*_size (see below)
-def atomic_cmp_swap : SDNode<"ISD::ATOMIC_CMP_SWAP" , STDAtomic3,
+def atomic_cmp_swap_8 : SDNode<"ISD::ATOMIC_CMP_SWAP_8" , STDAtomic3,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , STDAtomic2,
+def atomic_load_add_8 : SDNode<"ISD::ATOMIC_LOAD_ADD_8" , STDAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2,
+def atomic_swap_8 : SDNode<"ISD::ATOMIC_SWAP_8", STDAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , STDAtomic2,
+def atomic_load_sub_8 : SDNode<"ISD::ATOMIC_LOAD_SUB_8" , STDAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , STDAtomic2,
+def atomic_load_and_8 : SDNode<"ISD::ATOMIC_LOAD_AND_8" , STDAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_or : SDNode<"ISD::ATOMIC_LOAD_OR" , STDAtomic2,
+def atomic_load_or_8 : SDNode<"ISD::ATOMIC_LOAD_OR_8" , STDAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_xor : SDNode<"ISD::ATOMIC_LOAD_XOR" , STDAtomic2,
+def atomic_load_xor_8 : SDNode<"ISD::ATOMIC_LOAD_XOR_8" , STDAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_nand: SDNode<"ISD::ATOMIC_LOAD_NAND", STDAtomic2,
+def atomic_load_nand_8: SDNode<"ISD::ATOMIC_LOAD_NAND_8", STDAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_min : SDNode<"ISD::ATOMIC_LOAD_MIN", STDAtomic2,
+def atomic_load_min_8 : SDNode<"ISD::ATOMIC_LOAD_MIN_8", STDAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_max : SDNode<"ISD::ATOMIC_LOAD_MAX", STDAtomic2,
+def atomic_load_max_8 : SDNode<"ISD::ATOMIC_LOAD_MAX_8", STDAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", STDAtomic2,
+def atomic_load_umin_8 : SDNode<"ISD::ATOMIC_LOAD_UMIN_8", STDAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
-def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", STDAtomic2,
+def atomic_load_umax_8 : SDNode<"ISD::ATOMIC_LOAD_UMAX_8", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_cmp_swap_16 : SDNode<"ISD::ATOMIC_CMP_SWAP_16" , STDAtomic3,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_add_16 : SDNode<"ISD::ATOMIC_LOAD_ADD_16" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_swap_16 : SDNode<"ISD::ATOMIC_SWAP_16", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_sub_16 : SDNode<"ISD::ATOMIC_LOAD_SUB_16" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_and_16 : SDNode<"ISD::ATOMIC_LOAD_AND_16" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_or_16 : SDNode<"ISD::ATOMIC_LOAD_OR_16" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_xor_16 : SDNode<"ISD::ATOMIC_LOAD_XOR_16" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_nand_16: SDNode<"ISD::ATOMIC_LOAD_NAND_16", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_min_16 : SDNode<"ISD::ATOMIC_LOAD_MIN_16", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_max_16 : SDNode<"ISD::ATOMIC_LOAD_MAX_16", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_umin_16 : SDNode<"ISD::ATOMIC_LOAD_UMIN_16", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_umax_16 : SDNode<"ISD::ATOMIC_LOAD_UMAX_16", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_cmp_swap_32 : SDNode<"ISD::ATOMIC_CMP_SWAP_32" , STDAtomic3,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_add_32 : SDNode<"ISD::ATOMIC_LOAD_ADD_32" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_swap_32 : SDNode<"ISD::ATOMIC_SWAP_32", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_sub_32 : SDNode<"ISD::ATOMIC_LOAD_SUB_32" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_and_32 : SDNode<"ISD::ATOMIC_LOAD_AND_32" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_or_32 : SDNode<"ISD::ATOMIC_LOAD_OR_32" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_xor_32 : SDNode<"ISD::ATOMIC_LOAD_XOR_32" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_nand_32: SDNode<"ISD::ATOMIC_LOAD_NAND_32", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_min_32 : SDNode<"ISD::ATOMIC_LOAD_MIN_32", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_max_32 : SDNode<"ISD::ATOMIC_LOAD_MAX_32", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_umin_32 : SDNode<"ISD::ATOMIC_LOAD_UMIN_32", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_umax_32 : SDNode<"ISD::ATOMIC_LOAD_UMAX_32", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_cmp_swap_64 : SDNode<"ISD::ATOMIC_CMP_SWAP_64" , STDAtomic3,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_add_64 : SDNode<"ISD::ATOMIC_LOAD_ADD_64" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_swap_64 : SDNode<"ISD::ATOMIC_SWAP_64", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_sub_64 : SDNode<"ISD::ATOMIC_LOAD_SUB_64" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_and_64 : SDNode<"ISD::ATOMIC_LOAD_AND_64" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_or_64 : SDNode<"ISD::ATOMIC_LOAD_OR_64" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_xor_64 : SDNode<"ISD::ATOMIC_LOAD_XOR_64" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_nand_64: SDNode<"ISD::ATOMIC_LOAD_NAND_64", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_min_64 : SDNode<"ISD::ATOMIC_LOAD_MIN_64", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_max_64 : SDNode<"ISD::ATOMIC_LOAD_MAX_64", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_umin_64 : SDNode<"ISD::ATOMIC_LOAD_UMIN_64", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_umax_64 : SDNode<"ISD::ATOMIC_LOAD_UMAX_64", STDAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
// Do not use ld, st directly. Use load, extload, sextload, zextload, store,
@@ -724,177 +795,6 @@
ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32;
}]>;
-// Atomic patterns
-def atomic_cmp_swap_8 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
- (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i8;
-}]>;
-def atomic_cmp_swap_16 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
- (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i16;
-}]>;
-def atomic_cmp_swap_32 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
- (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i32;
-}]>;
-def atomic_cmp_swap_64 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
- (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i64;
-}]>;
-
-def atomic_load_add_8 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_add node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i8;
-}]>;
-def atomic_load_add_16 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_add node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i16;
-}]>;
-def atomic_load_add_32 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_add node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i32;
-}]>;
-def atomic_load_add_64 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_add node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i64;
-}]>;
-
-def atomic_load_sub_8 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_sub node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i8;
-}]>;
-def atomic_load_sub_16 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_sub node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i16;
-}]>;
-def atomic_load_sub_32 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_sub node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i32;
-}]>;
-def atomic_load_sub_64 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_sub node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i64;
-}]>;
-
-def atomic_load_and_8 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_and node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i8;
-}]>;
-def atomic_load_and_16 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_and node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i16;
-}]>;
-def atomic_load_and_32 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_and node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i32;
-}]>;
-def atomic_load_and_64 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_and node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i64;
-}]>;
-
-def atomic_load_or_8 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_or node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i8;
-}]>;
-def atomic_load_or_16 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_or node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i16;
-}]>;
-def atomic_load_or_32 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_or node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i32;
-}]>;
-def atomic_load_or_64 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_or node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i64;
-}]>;
-
-def atomic_load_xor_8 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_xor node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i8;
-}]>;
-def atomic_load_xor_16 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_xor node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i16;
-}]>;
-def atomic_load_xor_32 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_xor node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i32;
-}]>;
-def atomic_load_xor_64 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_xor node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i64;
-}]>;
-
-def atomic_load_nand_8 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_nand node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i8;
-}]>;
-def atomic_load_nand_16 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_nand node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i16;
-}]>;
-def atomic_load_nand_32 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_nand node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i32;
-}]>;
-def atomic_load_nand_64 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_load_nand node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i64;
-}]>;
-
-def atomic_swap_8 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_swap node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i8;
-}]>;
-def atomic_swap_16 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_swap node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i16;
-}]>;
-def atomic_swap_32 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_swap node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i32;
-}]>;
-def atomic_swap_64 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_swap node:$ptr, node:$inc), [{
- AtomicSDNode* V = cast<AtomicSDNode>(N);
- return V->getValueType(0) == MVT::i64;
-}]>;
-
-
-
// setcc convenience fragments.
def setoeq : PatFrag<(ops node:$lhs, node:$rhs),
(setcc node:$lhs, node:$rhs, SETOEQ)>;
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 9cc1bed..3bc5592 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -293,15 +293,15 @@
setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand);
// Expand certain atomics
- setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i8, Custom);
- setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i16, Custom);
- setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i32, Custom);
- setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP_8 , MVT::i8, Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP_16, MVT::i16, Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP_32, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP_64, MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i8, Expand);
- setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i16, Expand);
- setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i32, Expand);
- setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i64, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB_8, MVT::i8, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB_16, MVT::i16, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB_32, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB_64, MVT::i64, Expand);
// Use the default ISD::DBG_STOPPOINT, ISD::DECLARE expansion.
setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
@@ -5914,8 +5914,11 @@
MVT T = Op->getValueType(0);
SDValue negOp = DAG.getNode(ISD::SUB, T,
DAG.getConstant(0, T), Op->getOperand(2));
- return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, Op->getOperand(0),
- Op->getOperand(1), negOp,
+ return DAG.getAtomic((T==MVT::i8 ? ISD::ATOMIC_LOAD_ADD_8:
+ T==MVT::i16 ? ISD::ATOMIC_LOAD_ADD_16:
+ T==MVT::i32 ? ISD::ATOMIC_LOAD_ADD_32:
+ T==MVT::i64 ? ISD::ATOMIC_LOAD_ADD_64: 0),
+ Op->getOperand(0), Op->getOperand(1), negOp,
cast<AtomicSDNode>(Op)->getSrcValue(),
cast<AtomicSDNode>(Op)->getAlignment()).Val;
}
@@ -5925,7 +5928,10 @@
SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
switch (Op.getOpcode()) {
default: assert(0 && "Should not custom lower this!");
- case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG);
+ case ISD::ATOMIC_CMP_SWAP_8: return LowerCMP_SWAP(Op,DAG);
+ case ISD::ATOMIC_CMP_SWAP_16: return LowerCMP_SWAP(Op,DAG);
+ case ISD::ATOMIC_CMP_SWAP_32: return LowerCMP_SWAP(Op,DAG);
+ case ISD::ATOMIC_CMP_SWAP_64: return LowerCMP_SWAP(Op,DAG);
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
@@ -5979,8 +5985,11 @@
default: assert(0 && "Should not custom lower this!");
case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG);
case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG);
- case ISD::ATOMIC_CMP_SWAP: return ExpandATOMIC_CMP_SWAP(N, DAG);
- case ISD::ATOMIC_LOAD_SUB: return ExpandATOMIC_LOAD_SUB(N,DAG);
+ case ISD::ATOMIC_CMP_SWAP_64: return ExpandATOMIC_CMP_SWAP(N, DAG);
+ case ISD::ATOMIC_LOAD_SUB_8: return ExpandATOMIC_LOAD_SUB(N,DAG);
+ case ISD::ATOMIC_LOAD_SUB_16: return ExpandATOMIC_LOAD_SUB(N,DAG);
+ case ISD::ATOMIC_LOAD_SUB_32: return ExpandATOMIC_LOAD_SUB(N,DAG);
+ case ISD::ATOMIC_LOAD_SUB_64: return ExpandATOMIC_LOAD_SUB(N,DAG);
}
}
diff --git a/lib/Target/X86/X86Instr64bit.td b/lib/Target/X86/X86Instr64bit.td
index 93cd0c4..239ae97 100644
--- a/lib/Target/X86/X86Instr64bit.td
+++ b/lib/Target/X86/X86Instr64bit.td
@@ -1153,28 +1153,28 @@
usesCustomDAGSchedInserter = 1 in {
def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
"#ATOMAND64 PSUEDO!",
- [(set GR64:$dst, (atomic_load_and addr:$ptr, GR64:$val))]>;
+ [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>;
def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
"#ATOMOR64 PSUEDO!",
- [(set GR64:$dst, (atomic_load_or addr:$ptr, GR64:$val))]>;
+ [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>;
def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
"#ATOMXOR64 PSUEDO!",
- [(set GR64:$dst, (atomic_load_xor addr:$ptr, GR64:$val))]>;
+ [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>;
def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
"#ATOMNAND64 PSUEDO!",
- [(set GR64:$dst, (atomic_load_nand addr:$ptr, GR64:$val))]>;
+ [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>;
def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
"#ATOMMIN64 PSUEDO!",
- [(set GR64:$dst, (atomic_load_min addr:$ptr, GR64:$val))]>;
+ [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>;
def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
"#ATOMMAX64 PSUEDO!",
- [(set GR64:$dst, (atomic_load_max addr:$ptr, GR64:$val))]>;
+ [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>;
def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
"#ATOMUMIN64 PSUEDO!",
- [(set GR64:$dst, (atomic_load_umin addr:$ptr, GR64:$val))]>;
+ [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>;
def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
"#ATOMUMAX64 PSUEDO!",
- [(set GR64:$dst, (atomic_load_umax addr:$ptr, GR64:$val))]>;
+ [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>;
}
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index e55edce..ee84fc1 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -2637,66 +2637,66 @@
usesCustomDAGSchedInserter = 1 in {
def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
"#ATOMAND32 PSUEDO!",
- [(set GR32:$dst, (atomic_load_and addr:$ptr, GR32:$val))]>;
+ [(set GR32:$dst, (atomic_load_and_32 addr:$ptr, GR32:$val))]>;
def ATOMOR32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
"#ATOMOR32 PSUEDO!",
- [(set GR32:$dst, (atomic_load_or addr:$ptr, GR32:$val))]>;
+ [(set GR32:$dst, (atomic_load_or_32 addr:$ptr, GR32:$val))]>;
def ATOMXOR32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
"#ATOMXOR32 PSUEDO!",
- [(set GR32:$dst, (atomic_load_xor addr:$ptr, GR32:$val))]>;
+ [(set GR32:$dst, (atomic_load_xor_32 addr:$ptr, GR32:$val))]>;
def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
"#ATOMNAND32 PSUEDO!",
- [(set GR32:$dst, (atomic_load_nand addr:$ptr, GR32:$val))]>;
+ [(set GR32:$dst, (atomic_load_nand_32 addr:$ptr, GR32:$val))]>;
def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
"#ATOMMIN32 PSUEDO!",
- [(set GR32:$dst, (atomic_load_min addr:$ptr, GR32:$val))]>;
+ [(set GR32:$dst, (atomic_load_min_32 addr:$ptr, GR32:$val))]>;
def ATOMMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
"#ATOMMAX32 PSUEDO!",
- [(set GR32:$dst, (atomic_load_max addr:$ptr, GR32:$val))]>;
+ [(set GR32:$dst, (atomic_load_max_32 addr:$ptr, GR32:$val))]>;
def ATOMUMIN32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
"#ATOMUMIN32 PSUEDO!",
- [(set GR32:$dst, (atomic_load_umin addr:$ptr, GR32:$val))]>;
+ [(set GR32:$dst, (atomic_load_umin_32 addr:$ptr, GR32:$val))]>;
def ATOMUMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
"#ATOMUMAX32 PSUEDO!",
- [(set GR32:$dst, (atomic_load_umax addr:$ptr, GR32:$val))]>;
+ [(set GR32:$dst, (atomic_load_umax_32 addr:$ptr, GR32:$val))]>;
def ATOMAND16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
"#ATOMAND16 PSUEDO!",
- [(set GR16:$dst, (atomic_load_and addr:$ptr, GR16:$val))]>;
+ [(set GR16:$dst, (atomic_load_and_16 addr:$ptr, GR16:$val))]>;
def ATOMOR16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
"#ATOMOR16 PSUEDO!",
- [(set GR16:$dst, (atomic_load_or addr:$ptr, GR16:$val))]>;
+ [(set GR16:$dst, (atomic_load_or_16 addr:$ptr, GR16:$val))]>;
def ATOMXOR16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
"#ATOMXOR16 PSUEDO!",
- [(set GR16:$dst, (atomic_load_xor addr:$ptr, GR16:$val))]>;
+ [(set GR16:$dst, (atomic_load_xor_16 addr:$ptr, GR16:$val))]>;
def ATOMNAND16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
"#ATOMNAND16 PSUEDO!",
- [(set GR16:$dst, (atomic_load_nand addr:$ptr, GR16:$val))]>;
+ [(set GR16:$dst, (atomic_load_nand_16 addr:$ptr, GR16:$val))]>;
def ATOMMIN16: I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
"#ATOMMIN16 PSUEDO!",
- [(set GR16:$dst, (atomic_load_min addr:$ptr, GR16:$val))]>;
+ [(set GR16:$dst, (atomic_load_min_16 addr:$ptr, GR16:$val))]>;
def ATOMMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
"#ATOMMAX16 PSUEDO!",
- [(set GR16:$dst, (atomic_load_max addr:$ptr, GR16:$val))]>;
+ [(set GR16:$dst, (atomic_load_max_16 addr:$ptr, GR16:$val))]>;
def ATOMUMIN16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
"#ATOMUMIN16 PSUEDO!",
- [(set GR16:$dst, (atomic_load_umin addr:$ptr, GR16:$val))]>;
+ [(set GR16:$dst, (atomic_load_umin_16 addr:$ptr, GR16:$val))]>;
def ATOMUMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
"#ATOMUMAX16 PSUEDO!",
- [(set GR16:$dst, (atomic_load_umax addr:$ptr, GR16:$val))]>;
+ [(set GR16:$dst, (atomic_load_umax_16 addr:$ptr, GR16:$val))]>;
def ATOMAND8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
"#ATOMAND8 PSUEDO!",
- [(set GR8:$dst, (atomic_load_and addr:$ptr, GR8:$val))]>;
+ [(set GR8:$dst, (atomic_load_and_8 addr:$ptr, GR8:$val))]>;
def ATOMOR8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
"#ATOMOR8 PSUEDO!",
- [(set GR8:$dst, (atomic_load_or addr:$ptr, GR8:$val))]>;
+ [(set GR8:$dst, (atomic_load_or_8 addr:$ptr, GR8:$val))]>;
def ATOMXOR8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
"#ATOMXOR8 PSUEDO!",
- [(set GR8:$dst, (atomic_load_xor addr:$ptr, GR8:$val))]>;
+ [(set GR8:$dst, (atomic_load_xor_8 addr:$ptr, GR8:$val))]>;
def ATOMNAND8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
"#ATOMNAND8 PSUEDO!",
- [(set GR8:$dst, (atomic_load_nand addr:$ptr, GR8:$val))]>;
+ [(set GR8:$dst, (atomic_load_nand_8 addr:$ptr, GR8:$val))]>;
}
//===----------------------------------------------------------------------===//