Revert r162713: "Add ATOMIC_LDR* pseudo-instructions to model atomic_load on ARM."
This wasn't the right way to enforce ordering of atomics.
We are already setting the isVolatile bit on memory operands of atomic
operations which is good enough to enforce the correct ordering.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@162732 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 378331f..2112992 100644
--- a/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -2778,8 +2778,8 @@
// variants are one cycle cheaper.
switch (DefMCID->getOpcode()) {
default: break;
- case ARM::LDRrs: case ARM::ATOMIC_LDRrs:
- case ARM::LDRBrs: case ARM::ATOMIC_LDRBrs: {
+ case ARM::LDRrs:
+ case ARM::LDRBrs: {
unsigned ShOpVal = DefMI->getOperand(3).getImm();
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
if (ShImm == 0 ||
@@ -2787,9 +2787,9 @@
--Adjust;
break;
}
- case ARM::t2LDRs: case ARM::ATOMIC_t2LDRs:
- case ARM::t2LDRBs: case ARM::ATOMIC_t2LDRBs:
- case ARM::t2LDRHs: case ARM::ATOMIC_t2LDRHs:
+ case ARM::t2LDRs:
+ case ARM::t2LDRBs:
+ case ARM::t2LDRHs:
case ARM::t2LDRSHs: {
// Thumb2 mode: lsl only.
unsigned ShAmt = DefMI->getOperand(3).getImm();
@@ -3046,8 +3046,8 @@
// variants are one cycle cheaper.
switch (DefMCID.getOpcode()) {
default: break;
- case ARM::LDRrs: case ARM::ATOMIC_LDRrs:
- case ARM::LDRBrs: case ARM::ATOMIC_LDRBrs: {
+ case ARM::LDRrs:
+ case ARM::LDRBrs: {
unsigned ShOpVal =
cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
@@ -3056,9 +3056,9 @@
--Latency;
break;
}
- case ARM::t2LDRs: case ARM::ATOMIC_t2LDRs:
- case ARM::t2LDRBs: case ARM::ATOMIC_t2LDRBs:
- case ARM::t2LDRHs: case ARM::ATOMIC_t2LDRHs:
+ case ARM::t2LDRs:
+ case ARM::t2LDRBs:
+ case ARM::t2LDRHs:
case ARM::t2LDRSHs: {
// Thumb2 mode: lsl only.
unsigned ShAmt =