Added MemOperands to Atomic operations since Atomics touches memory.
Added abstract class MemSDNode for any Node that have an associated MemOperand
Changed atomic.lcs => atomic.cmp.swap, atomic.las => atomic.load.add, and
atomic.lss => atomic.load.sub
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@52706 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/Alpha/AlphaInstrInfo.td b/lib/Target/Alpha/AlphaInstrInfo.td
index ddefe85..42bd8ed 100644
--- a/lib/Target/Alpha/AlphaInstrInfo.td
+++ b/lib/Target/Alpha/AlphaInstrInfo.td
@@ -160,14 +160,14 @@
let usesCustomDAGSchedInserter = 1 in { // Expanded by the scheduler.
def CAS32 : PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$cmp, GPRC:$swp), "",
- [(set GPRC:$dst, (atomic_lcs_32 GPRC:$ptr, GPRC:$cmp, GPRC:$swp))], s_pseudo>;
+ [(set GPRC:$dst, (atomic_cmp_swap_32 GPRC:$ptr, GPRC:$cmp, GPRC:$swp))], s_pseudo>;
def CAS64 : PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$cmp, GPRC:$swp), "",
- [(set GPRC:$dst, (atomic_lcs_64 GPRC:$ptr, GPRC:$cmp, GPRC:$swp))], s_pseudo>;
+ [(set GPRC:$dst, (atomic_cmp_swap_64 GPRC:$ptr, GPRC:$cmp, GPRC:$swp))], s_pseudo>;
def LAS32 : PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$swp), "",
- [(set GPRC:$dst, (atomic_las_32 GPRC:$ptr, GPRC:$swp))], s_pseudo>;
+ [(set GPRC:$dst, (atomic_load_add_32 GPRC:$ptr, GPRC:$swp))], s_pseudo>;
def LAS64 :PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$swp), "",
- [(set GPRC:$dst, (atomic_las_64 GPRC:$ptr, GPRC:$swp))], s_pseudo>;
+ [(set GPRC:$dst, (atomic_load_add_64 GPRC:$ptr, GPRC:$swp))], s_pseudo>;
def SWAP32 : PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$swp), "",
[(set GPRC:$dst, (atomic_swap_32 GPRC:$ptr, GPRC:$swp))], s_pseudo>;
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index d4f8241..9432a74 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -204,12 +204,12 @@
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom);
- setOperationAction(ISD::ATOMIC_LAS , MVT::i32 , Custom);
- setOperationAction(ISD::ATOMIC_LCS , MVT::i32 , Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_ADD , MVT::i32 , Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i32 , Custom);
setOperationAction(ISD::ATOMIC_SWAP , MVT::i32 , Custom);
if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
- setOperationAction(ISD::ATOMIC_LAS , MVT::i64 , Custom);
- setOperationAction(ISD::ATOMIC_LCS , MVT::i64 , Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_ADD , MVT::i64 , Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i64 , Custom);
setOperationAction(ISD::ATOMIC_SWAP , MVT::i64 , Custom);
}
@@ -2721,7 +2721,7 @@
return DAG.getNode(PPCISD::DYNALLOC, VTs, Ops, 3);
}
-SDOperand PPCTargetLowering::LowerAtomicLAS(SDOperand Op, SelectionDAG &DAG) {
+SDOperand PPCTargetLowering::LowerAtomicLOAD_ADD(SDOperand Op, SelectionDAG &DAG) {
MVT VT = Op.Val->getValueType(0);
SDOperand Chain = Op.getOperand(0);
SDOperand Ptr = Op.getOperand(1);
@@ -2757,7 +2757,7 @@
OutOps, 2);
}
-SDOperand PPCTargetLowering::LowerAtomicLCS(SDOperand Op, SelectionDAG &DAG) {
+SDOperand PPCTargetLowering::LowerAtomicCMP_SWAP(SDOperand Op, SelectionDAG &DAG) {
MVT VT = Op.Val->getValueType(0);
SDOperand Chain = Op.getOperand(0);
SDOperand Ptr = Op.getOperand(1);
@@ -3942,8 +3942,8 @@
case ISD::DYNAMIC_STACKALLOC:
return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget);
- case ISD::ATOMIC_LAS: return LowerAtomicLAS(Op, DAG);
- case ISD::ATOMIC_LCS: return LowerAtomicLCS(Op, DAG);
+ case ISD::ATOMIC_LOAD_ADD: return LowerAtomicLOAD_ADD(Op, DAG);
+ case ISD::ATOMIC_CMP_SWAP: return LowerAtomicCMP_SWAP(Op, DAG);
case ISD::ATOMIC_SWAP: return LowerAtomicSWAP(Op, DAG);
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h
index 34012ff..e3ec7b0 100644
--- a/lib/Target/PowerPC/PPCISelLowering.h
+++ b/lib/Target/PowerPC/PPCISelLowering.h
@@ -366,8 +366,8 @@
SDOperand LowerDYNAMIC_STACKALLOC(SDOperand Op, SelectionDAG &DAG,
const PPCSubtarget &Subtarget);
SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG);
- SDOperand LowerAtomicLAS(SDOperand Op, SelectionDAG &DAG);
- SDOperand LowerAtomicLCS(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerAtomicLOAD_ADD(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerAtomicCMP_SWAP(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerAtomicSWAP(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG);
diff --git a/lib/Target/TargetSelectionDAG.td b/lib/Target/TargetSelectionDAG.td
index 38a6046..c30933d 100644
--- a/lib/Target/TargetSelectionDAG.td
+++ b/lib/Target/TargetSelectionDAG.td
@@ -220,6 +220,7 @@
def SDNPMayStore : SDNodeProperty; // May write to memory, sets 'mayStore'.
def SDNPMayLoad : SDNodeProperty; // May read memory, sets 'mayLoad'.
def SDNPSideEffect : SDNodeProperty; // Sets 'HasUnmodelledSideEffects'.
+def SDNPMemOperand : SDNodeProperty; // Touches memory, has assoc MemOperand
//===----------------------------------------------------------------------===//
// Selection DAG Node definitions.
@@ -353,39 +354,39 @@
[SDNPHasChain, SDNPSideEffect]>;
// Do not use atomic_* directly, use atomic_*_size (see below)
-def atomic_lcs : SDNode<"ISD::ATOMIC_LCS" , STDAtomic3,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
-def atomic_las : SDNode<"ISD::ATOMIC_LAS" , STDAtomic2,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
-def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
-def atomic_lss : SDNode<"ISD::ATOMIC_LSS" , STDAtomic2,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+def atomic_cmp_swap : SDNode<"ISD::ATOMIC_CMP_SWAP" , STDAtomic3,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , STDAtomic2,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_or : SDNode<"ISD::ATOMIC_LOAD_OR" , STDAtomic2,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_xor : SDNode<"ISD::ATOMIC_LOAD_XOR" , STDAtomic2,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_nand: SDNode<"ISD::ATOMIC_LOAD_NAND", STDAtomic2,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_min : SDNode<"ISD::ATOMIC_LOAD_MIN", STDAtomic2,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_max : SDNode<"ISD::ATOMIC_LOAD_MAX", STDAtomic2,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", STDAtomic2,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", STDAtomic2,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
// Do not use ld, st directly. Use load, extload, sextload, zextload, store,
// and truncst (see below).
def ld : SDNode<"ISD::LOAD" , SDTLoad,
- [SDNPHasChain, SDNPMayLoad]>;
+ [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def st : SDNode<"ISD::STORE" , SDTStore,
- [SDNPHasChain, SDNPMayStore]>;
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
def ist : SDNode<"ISD::STORE" , SDTIStore,
- [SDNPHasChain, SDNPMayStore]>;
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
def vector_shuffle : SDNode<"ISD::VECTOR_SHUFFLE", SDTVecShuffle, []>;
def build_vector : SDNode<"ISD::BUILD_VECTOR", SDTypeProfile<1, 0, []>, []>;
@@ -764,51 +765,51 @@
}]>;
//Atomic patterns
-def atomic_lcs_8 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
- (atomic_lcs node:$ptr, node:$cmp, node:$swp), [{
+def atomic_cmp_swap_8 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
+ (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
return V->getVT() == MVT::i8;
return false;
}]>;
-def atomic_lcs_16 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
- (atomic_lcs node:$ptr, node:$cmp, node:$swp), [{
+def atomic_cmp_swap_16 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
+ (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
return V->getVT() == MVT::i16;
return false;
}]>;
-def atomic_lcs_32 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
- (atomic_lcs node:$ptr, node:$cmp, node:$swp), [{
+def atomic_cmp_swap_32 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
+ (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
return V->getVT() == MVT::i32;
return false;
}]>;
-def atomic_lcs_64 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
- (atomic_lcs node:$ptr, node:$cmp, node:$swp), [{
+def atomic_cmp_swap_64 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
+ (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
return V->getVT() == MVT::i64;
return false;
}]>;
-def atomic_las_8 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_las node:$ptr, node:$inc), [{
+def atomic_load_add_8 : PatFrag<(ops node:$ptr, node:$inc),
+ (atomic_load_add node:$ptr, node:$inc), [{
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
return V->getVT() == MVT::i8;
return false;
}]>;
-def atomic_las_16 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_las node:$ptr, node:$inc), [{
+def atomic_load_add_16 : PatFrag<(ops node:$ptr, node:$inc),
+ (atomic_load_add node:$ptr, node:$inc), [{
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
return V->getVT() == MVT::i16;
return false;
}]>;
-def atomic_las_32 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_las node:$ptr, node:$inc), [{
+def atomic_load_add_32 : PatFrag<(ops node:$ptr, node:$inc),
+ (atomic_load_add node:$ptr, node:$inc), [{
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
return V->getVT() == MVT::i32;
return false;
}]>;
-def atomic_las_64 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_las node:$ptr, node:$inc), [{
+def atomic_load_add_64 : PatFrag<(ops node:$ptr, node:$inc),
+ (atomic_load_add node:$ptr, node:$inc), [{
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
return V->getVT() == MVT::i64;
return false;
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 93df72a..ae7f6e7 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -292,11 +292,11 @@
setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand);
// Expand certain atomics
- setOperationAction(ISD::ATOMIC_LCS , MVT::i8, Custom);
- setOperationAction(ISD::ATOMIC_LCS , MVT::i16, Custom);
- setOperationAction(ISD::ATOMIC_LCS , MVT::i32, Custom);
- setOperationAction(ISD::ATOMIC_LCS , MVT::i64, Custom);
- setOperationAction(ISD::ATOMIC_LSS , MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i8, Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i16, Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i32, Expand);
// Use the default ISD::LOCATION, ISD::DECLARE expansion.
setOperationAction(ISD::LOCATION, MVT::Other, Expand);
@@ -5655,7 +5655,7 @@
return Op;
}
-SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) {
+SDOperand X86TargetLowering::LowerCMP_SWAP(SDOperand Op, SelectionDAG &DAG) {
MVT T = cast<AtomicSDNode>(Op.Val)->getVT();
unsigned Reg = 0;
unsigned size = 0;
@@ -5669,7 +5669,7 @@
if (Subtarget->is64Bit()) {
Reg = X86::RAX; size = 8;
} else //Should go away when LowerType stuff lands
- return SDOperand(ExpandATOMIC_LCS(Op.Val, DAG), 0);
+ return SDOperand(ExpandATOMIC_CMP_SWAP(Op.Val, DAG), 0);
break;
};
SDOperand cpIn = DAG.getCopyToReg(Op.getOperand(0), Reg,
@@ -5686,9 +5686,9 @@
return cpOut;
}
-SDNode* X86TargetLowering::ExpandATOMIC_LCS(SDNode* Op, SelectionDAG &DAG) {
+SDNode* X86TargetLowering::ExpandATOMIC_CMP_SWAP(SDNode* Op, SelectionDAG &DAG) {
MVT T = cast<AtomicSDNode>(Op)->getVT();
- assert (T == MVT::i64 && "Only know how to expand i64 CAS");
+ assert (T == MVT::i64 && "Only know how to expand i64 Cmp and Swap");
SDOperand cpInL, cpInH;
cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3),
DAG.getConstant(0, MVT::i32));
@@ -5722,13 +5722,15 @@
return DAG.getNode(ISD::MERGE_VALUES, Tys, ResultVal, cpOutH.getValue(1)).Val;
}
-SDNode* X86TargetLowering::ExpandATOMIC_LSS(SDNode* Op, SelectionDAG &DAG) {
+SDNode* X86TargetLowering::ExpandATOMIC_LOAD_SUB(SDNode* Op, SelectionDAG &DAG) {
MVT T = cast<AtomicSDNode>(Op)->getVT();
- assert (T == MVT::i32 && "Only know how to expand i32 LSS");
+ assert (T == MVT::i32 && "Only know how to expand i32 Atomic Load Sub");
SDOperand negOp = DAG.getNode(ISD::SUB, T,
DAG.getConstant(0, T), Op->getOperand(2));
- return DAG.getAtomic(ISD::ATOMIC_LAS, Op->getOperand(0),
- Op->getOperand(1), negOp, T).Val;
+ return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, Op->getOperand(0),
+ Op->getOperand(1), negOp, T,
+ cast<AtomicSDNode>(Op)->getSrcValue(),
+ cast<AtomicSDNode>(Op)->getAlignment()).Val;
}
/// LowerOperation - Provide custom lowering hooks for some operations.
@@ -5736,7 +5738,7 @@
SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
switch (Op.getOpcode()) {
default: assert(0 && "Should not custom lower this!");
- case ISD::ATOMIC_LCS: return LowerLCS(Op,DAG);
+ case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG);
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
@@ -5788,8 +5790,8 @@
default: assert(0 && "Should not custom lower this!");
case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG);
case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG);
- case ISD::ATOMIC_LCS: return ExpandATOMIC_LCS(N, DAG);
- case ISD::ATOMIC_LSS: return ExpandATOMIC_LSS(N,DAG);
+ case ISD::ATOMIC_CMP_SWAP: return ExpandATOMIC_CMP_SWAP(N, DAG);
+ case ISD::ATOMIC_LOAD_SUB: return ExpandATOMIC_LOAD_SUB(N,DAG);
}
}
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index f2b851e..dff4bea 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -541,11 +541,11 @@
SDOperand LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerCTLZ(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerCTTZ(SDOperand Op, SelectionDAG &DAG);
- SDOperand LowerLCS(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerCMP_SWAP(SDOperand Op, SelectionDAG &DAG);
SDNode *ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG);
SDNode *ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG);
- SDNode *ExpandATOMIC_LCS(SDNode *N, SelectionDAG &DAG);
- SDNode *ExpandATOMIC_LSS(SDNode *N, SelectionDAG &DAG);
+ SDNode *ExpandATOMIC_CMP_SWAP(SDNode *N, SelectionDAG &DAG);
+ SDNode *ExpandATOMIC_LOAD_SUB(SDNode *N, SelectionDAG &DAG);
SDOperand EmitTargetCodeForMemset(SelectionDAG &DAG,
SDOperand Chain,
diff --git a/lib/Target/X86/X86Instr64bit.td b/lib/Target/X86/X86Instr64bit.td
index 8398e9a..23a4030 100644
--- a/lib/Target/X86/X86Instr64bit.td
+++ b/lib/Target/X86/X86Instr64bit.td
@@ -1124,7 +1124,7 @@
let Constraints = "$val = $dst", Defs = [EFLAGS] in {
def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
"lock xadd $val, $ptr",
- [(set GR64:$dst, (atomic_las_64 addr:$ptr, GR64:$val))]>,
+ [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
TB, LOCK;
def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
"xchg $val, $ptr",
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index 646655e..7a4e796 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -2614,19 +2614,19 @@
let Constraints = "$val = $dst", Defs = [EFLAGS] in {
def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
"lock xadd{l}\t{$val, $ptr|$ptr, $val}",
- [(set GR32:$dst, (atomic_las_32 addr:$ptr, GR32:$val))]>,
+ [(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))]>,
TB, LOCK;
def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
"lock xadd{w}\t{$val, $ptr|$ptr, $val}",
- [(set GR16:$dst, (atomic_las_16 addr:$ptr, GR16:$val))]>,
+ [(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))]>,
TB, OpSize, LOCK;
def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val),
"lock xadd{b}\t{$val, $ptr|$ptr, $val}",
- [(set GR8:$dst, (atomic_las_8 addr:$ptr, GR8:$val))]>,
+ [(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))]>,
TB, LOCK;
}
-// Atomic exchange and and, or, xor
+// Atomic exchange, and, or, xor
let Constraints = "$val = $dst", Defs = [EFLAGS],
usesCustomDAGSchedInserter = 1 in {
def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
@@ -2639,7 +2639,7 @@
"#ATOMXOR32 PSUEDO!",
[(set GR32:$dst, (atomic_load_xor addr:$ptr, GR32:$val))]>;
def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
- "#ATOMXOR32 PSUEDO!",
+ "#ATOMNAND32 PSUEDO!",
[(set GR32:$dst, (atomic_load_nand addr:$ptr, GR32:$val))]>;
def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),