[RISCV] Codegen for i8, i16, and i32 atomicrmw with RV32A
Introduce a new RISCVExpandPseudoInsts pass to expand atomic
pseudo-instructions after register allocation. This is necessary in order to
ensure that register spills aren't introduced between LL and SC, thus breaking
the forward progress guarantee for the operation. AArch64 does something
similar for CmpXchg (though only at O0), and Mips is moving towards this
approach (see D31287). See also [this mailing list
post](http://lists.llvm.org/pipermail/llvm-dev/2016-May/099490.html) from
James Knight, which summarises the issues with lowering to ll/sc in IR or
pre-RA.
See the [accompanying RFC
thread](http://lists.llvm.org/pipermail/llvm-dev/2018-June/123993.html) for an
overview of the lowering strategy.
Differential Revision: https://reviews.llvm.org/D47882
llvm-svn: 342534
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index b55afed..3a283ca 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -90,6 +90,7 @@
TargetLoweringBase::AtomicExpansionKind ExpansionKind);
AtomicRMWInst *widenPartwordAtomicRMW(AtomicRMWInst *AI);
void expandPartwordCmpXchg(AtomicCmpXchgInst *I);
+ void expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI);
AtomicCmpXchgInst *convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI);
static Value *insertRMWCmpXchgLoop(
@@ -411,8 +412,9 @@
return expandAtomicLoadToLL(LI);
case TargetLoweringBase::AtomicExpansionKind::CmpXChg:
return expandAtomicLoadToCmpXchg(LI);
+ default:
+ llvm_unreachable("Unhandled case in tryExpandAtomicLoad");
}
- llvm_unreachable("Unhandled case in tryExpandAtomicLoad");
}
bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
@@ -574,6 +576,10 @@
}
return true;
}
+ case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic: {
+ expandAtomicRMWToMaskedIntrinsic(AI);
+ return true;
+ }
default:
llvm_unreachable("Unhandled case in tryExpandAtomicRMW");
}
@@ -662,6 +668,9 @@
IRBuilder<> &Builder, Value *Loaded,
Value *Shifted_Inc, Value *Inc,
const PartwordMaskValues &PMV) {
+ // TODO: update to use
+ // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge in order
+ // to merge bits from two values without requiring PMV.Inv_Mask.
switch (Op) {
case AtomicRMWInst::Xchg: {
Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask);
@@ -914,6 +923,33 @@
I->eraseFromParent();
}
+void AtomicExpand::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
+ IRBuilder<> Builder(AI);
+
+ PartwordMaskValues PMV =
+ createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(),
+ TLI->getMinCmpXchgSizeInBits() / 8);
+
+ // The value operand must be sign-extended for signed min/max so that the
+ // target's signed comparison instructions can be used. Otherwise, just
+ // zero-ext.
+ Instruction::CastOps CastOp = Instruction::ZExt;
+ AtomicRMWInst::BinOp RMWOp = AI->getOperation();
+ if (RMWOp == AtomicRMWInst::Max || RMWOp == AtomicRMWInst::Min)
+ CastOp = Instruction::SExt;
+
+ Value *ValOperand_Shifted = Builder.CreateShl(
+ Builder.CreateCast(CastOp, AI->getValOperand(), PMV.WordType),
+ PMV.ShiftAmt, "ValOperand_Shifted");
+ Value *OldResult = TLI->emitMaskedAtomicRMWIntrinsic(
+ Builder, AI, PMV.AlignedAddr, ValOperand_Shifted, PMV.Mask, PMV.ShiftAmt,
+ AI->getOrdering());
+ Value *FinalOldResult = Builder.CreateTrunc(
+ Builder.CreateLShr(OldResult, PMV.ShiftAmt), PMV.ValueType);
+ AI->replaceAllUsesWith(FinalOldResult);
+ AI->eraseFromParent();
+}
+
Value *AtomicExpand::insertRMWLLSCLoop(
IRBuilder<> &Builder, Type *ResultTy, Value *Addr,
AtomicOrdering MemOpOrder,