blob: c7e7efd2282a742cef4f53efccc0b24dc271d0b6 [file] [log] [blame]
Robin Morisset59c23cd2014-08-21 21:50:01 +00001//===-- AtomicExpandPass.cpp - Expand atomic instructions -------===//
Tim Northoverc882eb02014-04-03 11:44:58 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains a pass (at IR level) to replace atomic instructions with
Philip Reames61a24ab2015-12-16 00:49:36 +000011// target specific instruction which implement the same semantics in a way
12// which better fits the target backend. This can include the use of either
13// (intrinsic-based) load-linked/store-conditional loops, AtomicCmpXchg, or
14// type coercions.
Tim Northoverc882eb02014-04-03 11:44:58 +000015//
16//===----------------------------------------------------------------------===//
17
JF Bastiene8aad292015-08-03 15:29:47 +000018#include "llvm/CodeGen/AtomicExpandUtils.h"
Tim Northoverc882eb02014-04-03 11:44:58 +000019#include "llvm/CodeGen/Passes.h"
20#include "llvm/IR/Function.h"
21#include "llvm/IR/IRBuilder.h"
Robin Morisseted3d48f2014-09-03 21:29:59 +000022#include "llvm/IR/InstIterator.h"
Tim Northoverc882eb02014-04-03 11:44:58 +000023#include "llvm/IR/Instructions.h"
24#include "llvm/IR/Intrinsics.h"
25#include "llvm/IR/Module.h"
26#include "llvm/Support/Debug.h"
Philip Reames23319012015-12-16 01:24:05 +000027#include "llvm/Support/raw_ostream.h"
Tim Northoverc882eb02014-04-03 11:44:58 +000028#include "llvm/Target/TargetLowering.h"
29#include "llvm/Target/TargetMachine.h"
Eric Christopherc40e5ed2014-06-19 21:03:04 +000030#include "llvm/Target/TargetSubtargetInfo.h"
31
Tim Northoverc882eb02014-04-03 11:44:58 +000032using namespace llvm;
33
Robin Morisset59c23cd2014-08-21 21:50:01 +000034#define DEBUG_TYPE "atomic-expand"
Chandler Carruth1b9dde02014-04-22 02:02:50 +000035
Tim Northoverc882eb02014-04-03 11:44:58 +000036namespace {
Robin Morisset59c23cd2014-08-21 21:50:01 +000037 class AtomicExpand: public FunctionPass {
Eric Christopherc40e5ed2014-06-19 21:03:04 +000038 const TargetMachine *TM;
Eric Christopherb11a1b72015-01-26 19:45:40 +000039 const TargetLowering *TLI;
Tim Northoverc882eb02014-04-03 11:44:58 +000040 public:
41 static char ID; // Pass identification, replacement for typeid
Robin Morisset59c23cd2014-08-21 21:50:01 +000042 explicit AtomicExpand(const TargetMachine *TM = nullptr)
Eric Christopherb11a1b72015-01-26 19:45:40 +000043 : FunctionPass(ID), TM(TM), TLI(nullptr) {
Robin Morisset59c23cd2014-08-21 21:50:01 +000044 initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
Tim Northover037f26f22014-04-17 18:22:47 +000045 }
Tim Northoverc882eb02014-04-03 11:44:58 +000046
47 bool runOnFunction(Function &F) override;
Tim Northoverc882eb02014-04-03 11:44:58 +000048
Robin Morisseted3d48f2014-09-03 21:29:59 +000049 private:
Robin Morissetdedef332014-09-23 20:31:14 +000050 bool bracketInstWithFences(Instruction *I, AtomicOrdering Order,
51 bool IsStore, bool IsLoad);
Philip Reames61a24ab2015-12-16 00:49:36 +000052 IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL);
53 LoadInst *convertAtomicLoadToIntegerType(LoadInst *LI);
Ahmed Bougacha52468672015-09-11 17:08:28 +000054 bool tryExpandAtomicLoad(LoadInst *LI);
Robin Morisset6dbbbc22014-09-23 20:59:25 +000055 bool expandAtomicLoadToLL(LoadInst *LI);
56 bool expandAtomicLoadToCmpXchg(LoadInst *LI);
Philip Reames61a24ab2015-12-16 00:49:36 +000057 StoreInst *convertAtomicStoreToIntegerType(StoreInst *SI);
Robin Morisseted3d48f2014-09-03 21:29:59 +000058 bool expandAtomicStore(StoreInst *SI);
JF Bastienf14889e2015-03-04 15:47:57 +000059 bool tryExpandAtomicRMW(AtomicRMWInst *AI);
Tim Northoverf520eff2015-12-02 18:12:57 +000060 bool expandAtomicOpToLLSC(
61 Instruction *I, Value *Addr, AtomicOrdering MemOpOrder,
62 std::function<Value *(IRBuilder<> &, Value *)> PerformOp);
Philip Reames1960cfd2016-02-19 00:06:41 +000063 AtomicCmpXchgInst *convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI);
Tim Northoverc882eb02014-04-03 11:44:58 +000064 bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
Robin Morisset810739d2014-09-25 17:27:43 +000065 bool isIdempotentRMW(AtomicRMWInst *AI);
66 bool simplifyIdempotentRMW(AtomicRMWInst *AI);
Tim Northoverc882eb02014-04-03 11:44:58 +000067 };
Alexander Kornienkof00654e2015-06-23 09:49:53 +000068}
Tim Northoverc882eb02014-04-03 11:44:58 +000069
Robin Morisset59c23cd2014-08-21 21:50:01 +000070char AtomicExpand::ID = 0;
71char &llvm::AtomicExpandID = AtomicExpand::ID;
72INITIALIZE_TM_PASS(AtomicExpand, "atomic-expand",
73 "Expand Atomic calls in terms of either load-linked & store-conditional or cmpxchg",
Jiangning Liud623c522014-06-11 07:04:37 +000074 false, false)
Tim Northover037f26f22014-04-17 18:22:47 +000075
Robin Morisset59c23cd2014-08-21 21:50:01 +000076FunctionPass *llvm::createAtomicExpandPass(const TargetMachine *TM) {
77 return new AtomicExpand(TM);
Tim Northover037f26f22014-04-17 18:22:47 +000078}
79
Robin Morisset59c23cd2014-08-21 21:50:01 +000080bool AtomicExpand::runOnFunction(Function &F) {
Eric Christopher4e048eeb2015-01-27 01:04:42 +000081 if (!TM || !TM->getSubtargetImpl(F)->enableAtomicExpand())
Tim Northover037f26f22014-04-17 18:22:47 +000082 return false;
Eric Christopher4e048eeb2015-01-27 01:04:42 +000083 TLI = TM->getSubtargetImpl(F)->getTargetLowering();
Tim Northover037f26f22014-04-17 18:22:47 +000084
Tim Northoverc882eb02014-04-03 11:44:58 +000085 SmallVector<Instruction *, 1> AtomicInsts;
86
87 // Changing control-flow while iterating through it is a bad idea, so gather a
88 // list of all atomic instructions before we start.
Robin Morisseted3d48f2014-09-03 21:29:59 +000089 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
90 if (I->isAtomic())
91 AtomicInsts.push_back(&*I);
Tim Northoverc882eb02014-04-03 11:44:58 +000092 }
93
Robin Morisseted3d48f2014-09-03 21:29:59 +000094 bool MadeChange = false;
95 for (auto I : AtomicInsts) {
96 auto LI = dyn_cast<LoadInst>(I);
97 auto SI = dyn_cast<StoreInst>(I);
98 auto RMWI = dyn_cast<AtomicRMWInst>(I);
99 auto CASI = dyn_cast<AtomicCmpXchgInst>(I);
Robin Morisseted3d48f2014-09-03 21:29:59 +0000100 assert((LI || SI || RMWI || CASI || isa<FenceInst>(I)) &&
101 "Unknown atomic instruction");
102
James Y Knightf44fc522016-03-16 22:12:04 +0000103 if (TLI->shouldInsertFencesForAtomic(I)) {
Philip Reames367fdd92016-02-18 19:45:31 +0000104 auto FenceOrdering = Monotonic;
105 bool IsStore, IsLoad;
Robin Morissetdedef332014-09-23 20:31:14 +0000106 if (LI && isAtLeastAcquire(LI->getOrdering())) {
107 FenceOrdering = LI->getOrdering();
108 LI->setOrdering(Monotonic);
109 IsStore = false;
110 IsLoad = true;
111 } else if (SI && isAtLeastRelease(SI->getOrdering())) {
112 FenceOrdering = SI->getOrdering();
113 SI->setOrdering(Monotonic);
114 IsStore = true;
115 IsLoad = false;
116 } else if (RMWI && (isAtLeastRelease(RMWI->getOrdering()) ||
117 isAtLeastAcquire(RMWI->getOrdering()))) {
118 FenceOrdering = RMWI->getOrdering();
119 RMWI->setOrdering(Monotonic);
120 IsStore = IsLoad = true;
Ahmed Bougacha52468672015-09-11 17:08:28 +0000121 } else if (CASI && !TLI->shouldExpandAtomicCmpXchgInIR(CASI) &&
Eric Christopherb11a1b72015-01-26 19:45:40 +0000122 (isAtLeastRelease(CASI->getSuccessOrdering()) ||
123 isAtLeastAcquire(CASI->getSuccessOrdering()))) {
Robin Morissetdedef332014-09-23 20:31:14 +0000124 // If a compare and swap is lowered to LL/SC, we can do smarter fence
125 // insertion, with a stronger one on the success path than on the
126 // failure path. As a result, fence insertion is directly done by
127 // expandAtomicCmpXchg in that case.
128 FenceOrdering = CASI->getSuccessOrdering();
129 CASI->setSuccessOrdering(Monotonic);
130 CASI->setFailureOrdering(Monotonic);
131 IsStore = IsLoad = true;
132 }
133
134 if (FenceOrdering != Monotonic) {
135 MadeChange |= bracketInstWithFences(I, FenceOrdering, IsStore, IsLoad);
136 }
137 }
138
Ahmed Bougacha52468672015-09-11 17:08:28 +0000139 if (LI) {
Philip Reames61a24ab2015-12-16 00:49:36 +0000140 if (LI->getType()->isFloatingPointTy()) {
141 // TODO: add a TLI hook to control this so that each target can
142 // convert to lowering the original type one at a time.
143 LI = convertAtomicLoadToIntegerType(LI);
144 assert(LI->getType()->isIntegerTy() && "invariant broken");
145 MadeChange = true;
146 }
147
Ahmed Bougacha52468672015-09-11 17:08:28 +0000148 MadeChange |= tryExpandAtomicLoad(LI);
Philip Reames61a24ab2015-12-16 00:49:36 +0000149 } else if (SI) {
150 if (SI->getValueOperand()->getType()->isFloatingPointTy()) {
151 // TODO: add a TLI hook to control this so that each target can
152 // convert to lowering the original type one at a time.
153 SI = convertAtomicStoreToIntegerType(SI);
154 assert(SI->getValueOperand()->getType()->isIntegerTy() &&
155 "invariant broken");
156 MadeChange = true;
157 }
158
159 if (TLI->shouldExpandAtomicStoreInIR(SI))
160 MadeChange |= expandAtomicStore(SI);
Robin Morisset810739d2014-09-25 17:27:43 +0000161 } else if (RMWI) {
162 // There are two different ways of expanding RMW instructions:
163 // - into a load if it is idempotent
164 // - into a Cmpxchg/LL-SC loop otherwise
165 // we try them in that order.
JF Bastienf14889e2015-03-04 15:47:57 +0000166
167 if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
168 MadeChange = true;
169 } else {
170 MadeChange |= tryExpandAtomicRMW(RMWI);
171 }
Philip Reames1960cfd2016-02-19 00:06:41 +0000172 } else if (CASI) {
173 // TODO: when we're ready to make the change at the IR level, we can
174 // extend convertCmpXchgToInteger for floating point too.
175 assert(!CASI->getCompareOperand()->getType()->isFloatingPointTy() &&
176 "unimplemented - floating point not legal at IR level");
177 if (CASI->getCompareOperand()->getType()->isPointerTy() ) {
178 // TODO: add a TLI hook to control this so that each target can
179 // convert to lowering the original type one at a time.
180 CASI = convertCmpXchgToIntegerType(CASI);
181 assert(CASI->getCompareOperand()->getType()->isIntegerTy() &&
182 "invariant broken");
183 MadeChange = true;
184 }
185
186 if (TLI->shouldExpandAtomicCmpXchgInIR(CASI))
187 MadeChange |= expandAtomicCmpXchg(CASI);
Robin Morisseted3d48f2014-09-03 21:29:59 +0000188 }
189 }
Tim Northoverc882eb02014-04-03 11:44:58 +0000190 return MadeChange;
191}
192
Robin Morissetdedef332014-09-23 20:31:14 +0000193bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order,
194 bool IsStore, bool IsLoad) {
195 IRBuilder<> Builder(I);
196
Eric Christopherb11a1b72015-01-26 19:45:40 +0000197 auto LeadingFence = TLI->emitLeadingFence(Builder, Order, IsStore, IsLoad);
Robin Morissetdedef332014-09-23 20:31:14 +0000198
Eric Christopherb11a1b72015-01-26 19:45:40 +0000199 auto TrailingFence = TLI->emitTrailingFence(Builder, Order, IsStore, IsLoad);
Robin Morissetdedef332014-09-23 20:31:14 +0000200 // The trailing fence is emitted before the instruction instead of after
201 // because there is no easy way of setting Builder insertion point after
202 // an instruction. So we must erase it from the BB, and insert it back
203 // in the right place.
204 // We have a guard here because not every atomic operation generates a
205 // trailing fence.
206 if (TrailingFence) {
207 TrailingFence->removeFromParent();
208 TrailingFence->insertAfter(I);
209 }
210
211 return (LeadingFence || TrailingFence);
212}
213
Philip Reames61a24ab2015-12-16 00:49:36 +0000214/// Get the iX type with the same bitwidth as T.
215IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T,
216 const DataLayout &DL) {
217 EVT VT = TLI->getValueType(DL, T);
218 unsigned BitWidth = VT.getStoreSizeInBits();
219 assert(BitWidth == VT.getSizeInBits() && "must be a power of two");
220 return IntegerType::get(T->getContext(), BitWidth);
221}
222
223/// Convert an atomic load of a non-integral type to an integer load of the
Philip Reames1960cfd2016-02-19 00:06:41 +0000224/// equivalent bitwidth. See the function comment on
Philip Reames61a24ab2015-12-16 00:49:36 +0000225/// convertAtomicStoreToIntegerType for background.
226LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
227 auto *M = LI->getModule();
228 Type *NewTy = getCorrespondingIntegerType(LI->getType(),
229 M->getDataLayout());
230
231 IRBuilder<> Builder(LI);
232
233 Value *Addr = LI->getPointerOperand();
234 Type *PT = PointerType::get(NewTy,
235 Addr->getType()->getPointerAddressSpace());
236 Value *NewAddr = Builder.CreateBitCast(Addr, PT);
237
238 auto *NewLI = Builder.CreateLoad(NewAddr);
239 NewLI->setAlignment(LI->getAlignment());
240 NewLI->setVolatile(LI->isVolatile());
241 NewLI->setAtomic(LI->getOrdering(), LI->getSynchScope());
242 DEBUG(dbgs() << "Replaced " << *LI << " with " << *NewLI << "\n");
243
244 Value *NewVal = Builder.CreateBitCast(NewLI, LI->getType());
245 LI->replaceAllUsesWith(NewVal);
246 LI->eraseFromParent();
247 return NewLI;
248}
249
Ahmed Bougacha52468672015-09-11 17:08:28 +0000250bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) {
251 switch (TLI->shouldExpandAtomicLoadInIR(LI)) {
252 case TargetLoweringBase::AtomicExpansionKind::None:
253 return false;
Tim Northoverf520eff2015-12-02 18:12:57 +0000254 case TargetLoweringBase::AtomicExpansionKind::LLSC:
255 return expandAtomicOpToLLSC(
256 LI, LI->getPointerOperand(), LI->getOrdering(),
257 [](IRBuilder<> &Builder, Value *Loaded) { return Loaded; });
258 case TargetLoweringBase::AtomicExpansionKind::LLOnly:
Robin Morisset6dbbbc22014-09-23 20:59:25 +0000259 return expandAtomicLoadToLL(LI);
Tim Northoverf520eff2015-12-02 18:12:57 +0000260 case TargetLoweringBase::AtomicExpansionKind::CmpXChg:
Robin Morisset6dbbbc22014-09-23 20:59:25 +0000261 return expandAtomicLoadToCmpXchg(LI);
Ahmed Bougacha52468672015-09-11 17:08:28 +0000262 }
Ahmed Bougacha52468672015-09-11 17:08:28 +0000263 llvm_unreachable("Unhandled case in tryExpandAtomicLoad");
Robin Morisset6dbbbc22014-09-23 20:59:25 +0000264}
265
266bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
Tim Northoverc882eb02014-04-03 11:44:58 +0000267 IRBuilder<> Builder(LI);
Tim Northoverc882eb02014-04-03 11:44:58 +0000268
Robin Morissetdedef332014-09-23 20:31:14 +0000269 // On some architectures, load-linked instructions are atomic for larger
270 // sizes than normal loads. For example, the only 64-bit load guaranteed
271 // to be single-copy atomic by ARM is an ldrexd (A3.5.3).
Robin Morisseta47cb412014-09-03 21:01:03 +0000272 Value *Val =
Robin Morissetdedef332014-09-23 20:31:14 +0000273 TLI->emitLoadLinked(Builder, LI->getPointerOperand(), LI->getOrdering());
Tim Northoverf520eff2015-12-02 18:12:57 +0000274 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
Tim Northoverc882eb02014-04-03 11:44:58 +0000275
276 LI->replaceAllUsesWith(Val);
277 LI->eraseFromParent();
278
279 return true;
280}
281
Robin Morisset6dbbbc22014-09-23 20:59:25 +0000282bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
283 IRBuilder<> Builder(LI);
284 AtomicOrdering Order = LI->getOrdering();
285 Value *Addr = LI->getPointerOperand();
286 Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
287 Constant *DummyVal = Constant::getNullValue(Ty);
288
289 Value *Pair = Builder.CreateAtomicCmpXchg(
290 Addr, DummyVal, DummyVal, Order,
291 AtomicCmpXchgInst::getStrongestFailureOrdering(Order));
292 Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded");
293
294 LI->replaceAllUsesWith(Loaded);
295 LI->eraseFromParent();
296
297 return true;
298}
299
Philip Reames61a24ab2015-12-16 00:49:36 +0000300/// Convert an atomic store of a non-integral type to an integer store of the
Philip Reames1960cfd2016-02-19 00:06:41 +0000301/// equivalent bitwidth. We used to not support floating point or vector
Philip Reames61a24ab2015-12-16 00:49:36 +0000302/// atomics in the IR at all. The backends learned to deal with the bitcast
303/// idiom because that was the only way of expressing the notion of a atomic
304/// float or vector store. The long term plan is to teach each backend to
305/// instruction select from the original atomic store, but as a migration
306/// mechanism, we convert back to the old format which the backends understand.
307/// Each backend will need individual work to recognize the new format.
308StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) {
309 IRBuilder<> Builder(SI);
310 auto *M = SI->getModule();
311 Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(),
312 M->getDataLayout());
313 Value *NewVal = Builder.CreateBitCast(SI->getValueOperand(), NewTy);
314
315 Value *Addr = SI->getPointerOperand();
316 Type *PT = PointerType::get(NewTy,
317 Addr->getType()->getPointerAddressSpace());
318 Value *NewAddr = Builder.CreateBitCast(Addr, PT);
319
320 StoreInst *NewSI = Builder.CreateStore(NewVal, NewAddr);
321 NewSI->setAlignment(SI->getAlignment());
322 NewSI->setVolatile(SI->isVolatile());
323 NewSI->setAtomic(SI->getOrdering(), SI->getSynchScope());
324 DEBUG(dbgs() << "Replaced " << *SI << " with " << *NewSI << "\n");
325 SI->eraseFromParent();
326 return NewSI;
327}
328
Robin Morisset59c23cd2014-08-21 21:50:01 +0000329bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
Robin Morisset25c8e312014-09-17 00:06:58 +0000330 // This function is only called on atomic stores that are too large to be
331 // atomic if implemented as a native store. So we replace them by an
332 // atomic swap, that can be implemented for example as a ldrex/strex on ARM
333 // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes.
JF Bastienf14889e2015-03-04 15:47:57 +0000334 // It is the responsibility of the target to only signal expansion via
Robin Morisset25c8e312014-09-17 00:06:58 +0000335 // shouldExpandAtomicRMW in cases where this is required and possible.
Tim Northoverc882eb02014-04-03 11:44:58 +0000336 IRBuilder<> Builder(SI);
337 AtomicRMWInst *AI =
338 Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
339 SI->getValueOperand(), SI->getOrdering());
340 SI->eraseFromParent();
341
342 // Now we have an appropriate swap instruction, lower it as usual.
JF Bastienf14889e2015-03-04 15:47:57 +0000343 return tryExpandAtomicRMW(AI);
Tim Northoverc882eb02014-04-03 11:44:58 +0000344}
345
JF Bastiene8aad292015-08-03 15:29:47 +0000346static void createCmpXchgInstFun(IRBuilder<> &Builder, Value *Addr,
347 Value *Loaded, Value *NewVal,
348 AtomicOrdering MemOpOrder,
349 Value *&Success, Value *&NewLoaded) {
350 Value* Pair = Builder.CreateAtomicCmpXchg(
351 Addr, Loaded, NewVal, MemOpOrder,
352 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder));
353 Success = Builder.CreateExtractValue(Pair, 1, "success");
354 NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
355}
356
Robin Morisset25c8e312014-09-17 00:06:58 +0000357/// Emit IR to implement the given atomicrmw operation on values in registers,
358/// returning the new value.
359static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
360 Value *Loaded, Value *Inc) {
361 Value *NewVal;
362 switch (Op) {
363 case AtomicRMWInst::Xchg:
364 return Inc;
365 case AtomicRMWInst::Add:
366 return Builder.CreateAdd(Loaded, Inc, "new");
367 case AtomicRMWInst::Sub:
368 return Builder.CreateSub(Loaded, Inc, "new");
369 case AtomicRMWInst::And:
370 return Builder.CreateAnd(Loaded, Inc, "new");
371 case AtomicRMWInst::Nand:
372 return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new");
373 case AtomicRMWInst::Or:
374 return Builder.CreateOr(Loaded, Inc, "new");
375 case AtomicRMWInst::Xor:
376 return Builder.CreateXor(Loaded, Inc, "new");
377 case AtomicRMWInst::Max:
378 NewVal = Builder.CreateICmpSGT(Loaded, Inc);
379 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
380 case AtomicRMWInst::Min:
381 NewVal = Builder.CreateICmpSLE(Loaded, Inc);
382 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
383 case AtomicRMWInst::UMax:
384 NewVal = Builder.CreateICmpUGT(Loaded, Inc);
385 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
386 case AtomicRMWInst::UMin:
387 NewVal = Builder.CreateICmpULE(Loaded, Inc);
388 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
389 default:
390 llvm_unreachable("Unknown atomic op");
391 }
392}
393
Tim Northoverf520eff2015-12-02 18:12:57 +0000394bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) {
395 switch (TLI->shouldExpandAtomicRMWInIR(AI)) {
396 case TargetLoweringBase::AtomicExpansionKind::None:
397 return false;
398 case TargetLoweringBase::AtomicExpansionKind::LLSC:
399 return expandAtomicOpToLLSC(AI, AI->getPointerOperand(), AI->getOrdering(),
400 [&](IRBuilder<> &Builder, Value *Loaded) {
401 return performAtomicOp(AI->getOperation(),
402 Builder, Loaded,
403 AI->getValOperand());
404 });
405 case TargetLoweringBase::AtomicExpansionKind::CmpXChg:
406 return expandAtomicRMWToCmpXchg(AI, createCmpXchgInstFun);
407 default:
408 llvm_unreachable("Unhandled case in tryExpandAtomicRMW");
409 }
410}
411
412bool AtomicExpand::expandAtomicOpToLLSC(
413 Instruction *I, Value *Addr, AtomicOrdering MemOpOrder,
414 std::function<Value *(IRBuilder<> &, Value *)> PerformOp) {
415 BasicBlock *BB = I->getParent();
Tim Northoverc882eb02014-04-03 11:44:58 +0000416 Function *F = BB->getParent();
417 LLVMContext &Ctx = F->getContext();
418
419 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
420 //
421 // The standard expansion we produce is:
422 // [...]
423 // fence?
424 // atomicrmw.start:
425 // %loaded = @load.linked(%addr)
426 // %new = some_op iN %loaded, %incr
427 // %stored = @store_conditional(%new, %addr)
428 // %try_again = icmp i32 ne %stored, 0
429 // br i1 %try_again, label %loop, label %atomicrmw.end
430 // atomicrmw.end:
431 // fence?
432 // [...]
Tim Northoverf520eff2015-12-02 18:12:57 +0000433 BasicBlock *ExitBB = BB->splitBasicBlock(I->getIterator(), "atomicrmw.end");
Tim Northoverc882eb02014-04-03 11:44:58 +0000434 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
435
Tim Northoverf520eff2015-12-02 18:12:57 +0000436 // This grabs the DebugLoc from I.
437 IRBuilder<> Builder(I);
Tim Northoverc882eb02014-04-03 11:44:58 +0000438
439 // The split call above "helpfully" added a branch at the end of BB (to the
440 // wrong place), but we might want a fence too. It's easiest to just remove
441 // the branch entirely.
442 std::prev(BB->end())->eraseFromParent();
443 Builder.SetInsertPoint(BB);
Tim Northoverc882eb02014-04-03 11:44:58 +0000444 Builder.CreateBr(LoopBB);
445
446 // Start the main loop block now that we've taken care of the preliminaries.
447 Builder.SetInsertPoint(LoopBB);
Robin Morisseta47cb412014-09-03 21:01:03 +0000448 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
Tim Northoverc882eb02014-04-03 11:44:58 +0000449
Tim Northoverf520eff2015-12-02 18:12:57 +0000450 Value *NewVal = PerformOp(Builder, Loaded);
Tim Northoverc882eb02014-04-03 11:44:58 +0000451
Eric Christopherd9134482014-08-04 21:25:23 +0000452 Value *StoreSuccess =
Robin Morisseta47cb412014-09-03 21:01:03 +0000453 TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder);
Tim Northoverc882eb02014-04-03 11:44:58 +0000454 Value *TryAgain = Builder.CreateICmpNE(
455 StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
456 Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
457
458 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
Tim Northoverc882eb02014-04-03 11:44:58 +0000459
Tim Northoverf520eff2015-12-02 18:12:57 +0000460 I->replaceAllUsesWith(Loaded);
461 I->eraseFromParent();
Tim Northoverc882eb02014-04-03 11:44:58 +0000462
463 return true;
464}
465
Philip Reames1960cfd2016-02-19 00:06:41 +0000466/// Convert an atomic cmpxchg of a non-integral type to an integer cmpxchg of
467/// the equivalent bitwidth. We used to not support pointer cmpxchg in the
468/// IR. As a migration step, we convert back to what use to be the standard
469/// way to represent a pointer cmpxchg so that we can update backends one by
470/// one.
471AtomicCmpXchgInst *AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
472 auto *M = CI->getModule();
473 Type *NewTy = getCorrespondingIntegerType(CI->getCompareOperand()->getType(),
474 M->getDataLayout());
475
476 IRBuilder<> Builder(CI);
477
478 Value *Addr = CI->getPointerOperand();
479 Type *PT = PointerType::get(NewTy,
480 Addr->getType()->getPointerAddressSpace());
481 Value *NewAddr = Builder.CreateBitCast(Addr, PT);
482
483 Value *NewCmp = Builder.CreatePtrToInt(CI->getCompareOperand(), NewTy);
484 Value *NewNewVal = Builder.CreatePtrToInt(CI->getNewValOperand(), NewTy);
485
486
487 auto *NewCI = Builder.CreateAtomicCmpXchg(NewAddr, NewCmp, NewNewVal,
488 CI->getSuccessOrdering(),
489 CI->getFailureOrdering(),
490 CI->getSynchScope());
491 NewCI->setVolatile(CI->isVolatile());
492 NewCI->setWeak(CI->isWeak());
493 DEBUG(dbgs() << "Replaced " << *CI << " with " << *NewCI << "\n");
494
495 Value *OldVal = Builder.CreateExtractValue(NewCI, 0);
496 Value *Succ = Builder.CreateExtractValue(NewCI, 1);
497
498 OldVal = Builder.CreateIntToPtr(OldVal, CI->getCompareOperand()->getType());
499
500 Value *Res = UndefValue::get(CI->getType());
501 Res = Builder.CreateInsertValue(Res, OldVal, 0);
502 Res = Builder.CreateInsertValue(Res, Succ, 1);
503
504 CI->replaceAllUsesWith(Res);
505 CI->eraseFromParent();
506 return NewCI;
507}
508
509
Robin Morisset59c23cd2014-08-21 21:50:01 +0000510bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
Tim Northover70450c52014-04-03 13:06:54 +0000511 AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
512 AtomicOrdering FailureOrder = CI->getFailureOrdering();
Tim Northoverc882eb02014-04-03 11:44:58 +0000513 Value *Addr = CI->getPointerOperand();
514 BasicBlock *BB = CI->getParent();
515 Function *F = BB->getParent();
516 LLVMContext &Ctx = F->getContext();
James Y Knightf44fc522016-03-16 22:12:04 +0000517 // If shouldInsertFencesForAtomic() returns true, then the target does not
518 // want to deal with memory orders, and emitLeading/TrailingFence should take
519 // care of everything. Otherwise, emitLeading/TrailingFence are no-op and we
Robin Morisseted3d48f2014-09-03 21:29:59 +0000520 // should preserve the ordering.
James Y Knightf44fc522016-03-16 22:12:04 +0000521 bool ShouldInsertFencesForAtomic = TLI->shouldInsertFencesForAtomic(CI);
Robin Morisseta47cb412014-09-03 21:01:03 +0000522 AtomicOrdering MemOpOrder =
James Y Knightf44fc522016-03-16 22:12:04 +0000523 ShouldInsertFencesForAtomic ? Monotonic : SuccessOrder;
Tim Northoverc882eb02014-04-03 11:44:58 +0000524
Tim Northoverd32f8e62016-02-22 20:55:50 +0000525 // In implementations which use a barrier to achieve release semantics, we can
526 // delay emitting this barrier until we know a store is actually going to be
527 // attempted. The cost of this delay is that we need 2 copies of the block
528 // emitting the load-linked, affecting code size.
529 //
530 // Ideally, this logic would be unconditional except for the minsize check
531 // since in other cases the extra blocks naturally collapse down to the
532 // minimal loop. Unfortunately, this puts too much stress on later
533 // optimisations so we avoid emitting the extra logic in those cases too.
James Y Knightf44fc522016-03-16 22:12:04 +0000534 bool HasReleasedLoadBB = !CI->isWeak() && ShouldInsertFencesForAtomic &&
Tim Northoverd32f8e62016-02-22 20:55:50 +0000535 SuccessOrder != Monotonic &&
536 SuccessOrder != Acquire && !F->optForMinSize();
537
538 // There's no overhead for sinking the release barrier in a weak cmpxchg, so
539 // do it even on minsize.
540 bool UseUnconditionalReleaseBarrier = F->optForMinSize() && !CI->isWeak();
541
Tim Northoverc882eb02014-04-03 11:44:58 +0000542 // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
543 //
Tim Northover70450c52014-04-03 13:06:54 +0000544 // The full expansion we produce is:
Tim Northoverc882eb02014-04-03 11:44:58 +0000545 // [...]
Tim Northoverc882eb02014-04-03 11:44:58 +0000546 // cmpxchg.start:
Tim Northoverd32f8e62016-02-22 20:55:50 +0000547 // %unreleasedload = @load.linked(%addr)
548 // %should_store = icmp eq %unreleasedload, %desired
549 // br i1 %should_store, label %cmpxchg.fencedstore,
Ahmed Bougacha07a844d2015-09-22 17:21:44 +0000550 // label %cmpxchg.nostore
Tim Northoverd32f8e62016-02-22 20:55:50 +0000551 // cmpxchg.releasingstore:
552 // fence?
553 // br label cmpxchg.trystore
Tim Northoverc882eb02014-04-03 11:44:58 +0000554 // cmpxchg.trystore:
Tim Northoverd32f8e62016-02-22 20:55:50 +0000555 // %loaded.trystore = phi [%unreleasedload, %releasingstore],
556 // [%releasedload, %cmpxchg.releasedload]
Tim Northoverc882eb02014-04-03 11:44:58 +0000557 // %stored = @store_conditional(%new, %addr)
Tim Northover20b9f732014-06-13 16:45:52 +0000558 // %success = icmp eq i32 %stored, 0
Tim Northoverd32f8e62016-02-22 20:55:50 +0000559 // br i1 %success, label %cmpxchg.success,
560 // label %cmpxchg.releasedload/%cmpxchg.failure
561 // cmpxchg.releasedload:
562 // %releasedload = @load.linked(%addr)
563 // %should_store = icmp eq %releasedload, %desired
564 // br i1 %should_store, label %cmpxchg.trystore,
565 // label %cmpxchg.failure
Tim Northover20b9f732014-06-13 16:45:52 +0000566 // cmpxchg.success:
567 // fence?
568 // br label %cmpxchg.end
Ahmed Bougacha07a844d2015-09-22 17:21:44 +0000569 // cmpxchg.nostore:
Tim Northoverd32f8e62016-02-22 20:55:50 +0000570 // %loaded.nostore = phi [%unreleasedload, %cmpxchg.start],
571 // [%releasedload,
572 // %cmpxchg.releasedload/%cmpxchg.trystore]
Ahmed Bougacha07a844d2015-09-22 17:21:44 +0000573 // @load_linked_fail_balance()?
574 // br label %cmpxchg.failure
Tim Northover20b9f732014-06-13 16:45:52 +0000575 // cmpxchg.failure:
Tim Northoverc882eb02014-04-03 11:44:58 +0000576 // fence?
Tim Northover70450c52014-04-03 13:06:54 +0000577 // br label %cmpxchg.end
578 // cmpxchg.end:
Tim Northoverd32f8e62016-02-22 20:55:50 +0000579 // %loaded = phi [%loaded.nostore, %cmpxchg.failure],
580 // [%loaded.trystore, %cmpxchg.trystore]
Tim Northover20b9f732014-06-13 16:45:52 +0000581 // %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure]
582 // %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
583 // %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
Tim Northoverc882eb02014-04-03 11:44:58 +0000584 // [...]
Duncan P. N. Exon Smith8f11e1a2015-10-09 16:54:49 +0000585 BasicBlock *ExitBB = BB->splitBasicBlock(CI->getIterator(), "cmpxchg.end");
Tim Northover20b9f732014-06-13 16:45:52 +0000586 auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB);
Ahmed Bougacha07a844d2015-09-22 17:21:44 +0000587 auto NoStoreBB = BasicBlock::Create(Ctx, "cmpxchg.nostore", F, FailureBB);
588 auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, NoStoreBB);
Tim Northoverd32f8e62016-02-22 20:55:50 +0000589 auto ReleasedLoadBB =
590 BasicBlock::Create(Ctx, "cmpxchg.releasedload", F, SuccessBB);
591 auto TryStoreBB =
592 BasicBlock::Create(Ctx, "cmpxchg.trystore", F, ReleasedLoadBB);
593 auto ReleasingStoreBB =
594 BasicBlock::Create(Ctx, "cmpxchg.fencedstore", F, TryStoreBB);
595 auto StartBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, ReleasingStoreBB);
Tim Northoverc882eb02014-04-03 11:44:58 +0000596
597 // This grabs the DebugLoc from CI
598 IRBuilder<> Builder(CI);
599
600 // The split call above "helpfully" added a branch at the end of BB (to the
601 // wrong place), but we might want a fence too. It's easiest to just remove
602 // the branch entirely.
603 std::prev(BB->end())->eraseFromParent();
604 Builder.SetInsertPoint(BB);
James Y Knightf44fc522016-03-16 22:12:04 +0000605 if (ShouldInsertFencesForAtomic && UseUnconditionalReleaseBarrier)
Tim Northoverd32f8e62016-02-22 20:55:50 +0000606 TLI->emitLeadingFence(Builder, SuccessOrder, /*IsStore=*/true,
607 /*IsLoad=*/true);
608 Builder.CreateBr(StartBB);
Tim Northoverc882eb02014-04-03 11:44:58 +0000609
610 // Start the main loop block now that we've taken care of the preliminaries.
Tim Northoverd32f8e62016-02-22 20:55:50 +0000611 Builder.SetInsertPoint(StartBB);
612 Value *UnreleasedLoad = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
613 Value *ShouldStore = Builder.CreateICmpEQ(
614 UnreleasedLoad, CI->getCompareOperand(), "should_store");
Tim Northover70450c52014-04-03 13:06:54 +0000615
Eric Christopher572e03a2015-06-19 01:53:21 +0000616 // If the cmpxchg doesn't actually need any ordering when it fails, we can
Tim Northover70450c52014-04-03 13:06:54 +0000617 // jump straight past that fence instruction (if it exists).
Tim Northoverd32f8e62016-02-22 20:55:50 +0000618 Builder.CreateCondBr(ShouldStore, ReleasingStoreBB, NoStoreBB);
619
620 Builder.SetInsertPoint(ReleasingStoreBB);
James Y Knightf44fc522016-03-16 22:12:04 +0000621 if (ShouldInsertFencesForAtomic && !UseUnconditionalReleaseBarrier)
Tim Northoverd32f8e62016-02-22 20:55:50 +0000622 TLI->emitLeadingFence(Builder, SuccessOrder, /*IsStore=*/true,
623 /*IsLoad=*/true);
624 Builder.CreateBr(TryStoreBB);
Tim Northoverc882eb02014-04-03 11:44:58 +0000625
626 Builder.SetInsertPoint(TryStoreBB);
Robin Morisseta47cb412014-09-03 21:01:03 +0000627 Value *StoreSuccess = TLI->emitStoreConditional(
628 Builder, CI->getNewValOperand(), Addr, MemOpOrder);
Tim Northoverd039abd2014-06-13 16:45:36 +0000629 StoreSuccess = Builder.CreateICmpEQ(
Tim Northoverc882eb02014-04-03 11:44:58 +0000630 StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
Tim Northoverd32f8e62016-02-22 20:55:50 +0000631 BasicBlock *RetryBB = HasReleasedLoadBB ? ReleasedLoadBB : StartBB;
Tim Northover20b9f732014-06-13 16:45:52 +0000632 Builder.CreateCondBr(StoreSuccess, SuccessBB,
Tim Northoverd32f8e62016-02-22 20:55:50 +0000633 CI->isWeak() ? FailureBB : RetryBB);
Tim Northoverc882eb02014-04-03 11:44:58 +0000634
Tim Northoverd32f8e62016-02-22 20:55:50 +0000635 Builder.SetInsertPoint(ReleasedLoadBB);
636 Value *SecondLoad;
637 if (HasReleasedLoadBB) {
638 SecondLoad = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
639 ShouldStore = Builder.CreateICmpEQ(SecondLoad, CI->getCompareOperand(),
640 "should_store");
641
642 // If the cmpxchg doesn't actually need any ordering when it fails, we can
643 // jump straight past that fence instruction (if it exists).
644 Builder.CreateCondBr(ShouldStore, TryStoreBB, NoStoreBB);
645 } else
646 Builder.CreateUnreachable();
647
648 // Make sure later instructions don't get reordered with a fence if
649 // necessary.
Tim Northover20b9f732014-06-13 16:45:52 +0000650 Builder.SetInsertPoint(SuccessBB);
James Y Knightf44fc522016-03-16 22:12:04 +0000651 if (ShouldInsertFencesForAtomic)
652 TLI->emitTrailingFence(Builder, SuccessOrder, /*IsStore=*/true,
653 /*IsLoad=*/true);
Tim Northover70450c52014-04-03 13:06:54 +0000654 Builder.CreateBr(ExitBB);
Tim Northoverc882eb02014-04-03 11:44:58 +0000655
Ahmed Bougacha07a844d2015-09-22 17:21:44 +0000656 Builder.SetInsertPoint(NoStoreBB);
657 // In the failing case, where we don't execute the store-conditional, the
658 // target might want to balance out the load-linked with a dedicated
659 // instruction (e.g., on ARM, clearing the exclusive monitor).
660 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
661 Builder.CreateBr(FailureBB);
662
Tim Northover20b9f732014-06-13 16:45:52 +0000663 Builder.SetInsertPoint(FailureBB);
James Y Knightf44fc522016-03-16 22:12:04 +0000664 if (ShouldInsertFencesForAtomic)
665 TLI->emitTrailingFence(Builder, FailureOrder, /*IsStore=*/true,
666 /*IsLoad=*/true);
Tim Northover20b9f732014-06-13 16:45:52 +0000667 Builder.CreateBr(ExitBB);
668
Tim Northoverb4ddc082014-05-30 10:09:59 +0000669 // Finally, we have control-flow based knowledge of whether the cmpxchg
670 // succeeded or not. We expose this to later passes by converting any
Tim Northoverd32f8e62016-02-22 20:55:50 +0000671 // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate
672 // PHI.
Tim Northover20b9f732014-06-13 16:45:52 +0000673 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
Tim Northover420a2162014-06-13 14:24:07 +0000674 PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2);
675 Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB);
Tim Northover20b9f732014-06-13 16:45:52 +0000676 Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB);
Tim Northoverb4ddc082014-05-30 10:09:59 +0000677
Tim Northoverd32f8e62016-02-22 20:55:50 +0000678 // Setup the builder so we can create any PHIs we need.
679 Value *Loaded;
680 if (!HasReleasedLoadBB)
681 Loaded = UnreleasedLoad;
682 else {
683 Builder.SetInsertPoint(TryStoreBB, TryStoreBB->begin());
684 PHINode *TryStoreLoaded = Builder.CreatePHI(UnreleasedLoad->getType(), 2);
685 TryStoreLoaded->addIncoming(UnreleasedLoad, ReleasingStoreBB);
686 TryStoreLoaded->addIncoming(SecondLoad, ReleasedLoadBB);
687
688 Builder.SetInsertPoint(NoStoreBB, NoStoreBB->begin());
689 PHINode *NoStoreLoaded = Builder.CreatePHI(UnreleasedLoad->getType(), 2);
690 NoStoreLoaded->addIncoming(UnreleasedLoad, StartBB);
691 NoStoreLoaded->addIncoming(SecondLoad, ReleasedLoadBB);
692
693 Builder.SetInsertPoint(ExitBB, ++ExitBB->begin());
694 PHINode *ExitLoaded = Builder.CreatePHI(UnreleasedLoad->getType(), 2);
695 ExitLoaded->addIncoming(TryStoreLoaded, SuccessBB);
696 ExitLoaded->addIncoming(NoStoreLoaded, FailureBB);
697
698 Loaded = ExitLoaded;
699 }
700
Tim Northoverb4ddc082014-05-30 10:09:59 +0000701 // Look for any users of the cmpxchg that are just comparing the loaded value
702 // against the desired one, and replace them with the CFG-derived version.
Tim Northover420a2162014-06-13 14:24:07 +0000703 SmallVector<ExtractValueInst *, 2> PrunedInsts;
Tim Northoverb4ddc082014-05-30 10:09:59 +0000704 for (auto User : CI->users()) {
Tim Northover420a2162014-06-13 14:24:07 +0000705 ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User);
706 if (!EV)
Tim Northoverb4ddc082014-05-30 10:09:59 +0000707 continue;
708
Tim Northover420a2162014-06-13 14:24:07 +0000709 assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 &&
710 "weird extraction from { iN, i1 }");
Tim Northoverb4ddc082014-05-30 10:09:59 +0000711
Tim Northover420a2162014-06-13 14:24:07 +0000712 if (EV->getIndices()[0] == 0)
713 EV->replaceAllUsesWith(Loaded);
714 else
715 EV->replaceAllUsesWith(Success);
716
717 PrunedInsts.push_back(EV);
Tim Northoverb4ddc082014-05-30 10:09:59 +0000718 }
719
Tim Northover420a2162014-06-13 14:24:07 +0000720 // We can remove the instructions now we're no longer iterating through them.
721 for (auto EV : PrunedInsts)
722 EV->eraseFromParent();
Tim Northoverc882eb02014-04-03 11:44:58 +0000723
Tim Northover420a2162014-06-13 14:24:07 +0000724 if (!CI->use_empty()) {
725 // Some use of the full struct return that we don't understand has happened,
726 // so we've got to reconstruct it properly.
727 Value *Res;
728 Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0);
729 Res = Builder.CreateInsertValue(Res, Success, 1);
730
731 CI->replaceAllUsesWith(Res);
732 }
733
734 CI->eraseFromParent();
Tim Northoverc882eb02014-04-03 11:44:58 +0000735 return true;
736}
Robin Morisset810739d2014-09-25 17:27:43 +0000737
738bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) {
739 auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
740 if(!C)
741 return false;
742
743 AtomicRMWInst::BinOp Op = RMWI->getOperation();
744 switch(Op) {
745 case AtomicRMWInst::Add:
746 case AtomicRMWInst::Sub:
747 case AtomicRMWInst::Or:
748 case AtomicRMWInst::Xor:
749 return C->isZero();
750 case AtomicRMWInst::And:
751 return C->isMinusOne();
752 // FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/...
753 default:
754 return false;
755 }
756}
757
758bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) {
Ahmed Bougacha49b531a2015-09-12 18:51:23 +0000759 if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
760 tryExpandAtomicLoad(ResultingLoad);
761 return true;
762 }
Robin Morisset810739d2014-09-25 17:27:43 +0000763 return false;
764}
JF Bastiene8aad292015-08-03 15:29:47 +0000765
766bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
767 CreateCmpXchgInstFun CreateCmpXchg) {
768 assert(AI);
769
770 AtomicOrdering MemOpOrder =
771 AI->getOrdering() == Unordered ? Monotonic : AI->getOrdering();
772 Value *Addr = AI->getPointerOperand();
773 BasicBlock *BB = AI->getParent();
774 Function *F = BB->getParent();
775 LLVMContext &Ctx = F->getContext();
776
777 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
778 //
779 // The standard expansion we produce is:
780 // [...]
781 // %init_loaded = load atomic iN* %addr
782 // br label %loop
783 // loop:
784 // %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
785 // %new = some_op iN %loaded, %incr
786 // %pair = cmpxchg iN* %addr, iN %loaded, iN %new
787 // %new_loaded = extractvalue { iN, i1 } %pair, 0
788 // %success = extractvalue { iN, i1 } %pair, 1
789 // br i1 %success, label %atomicrmw.end, label %loop
790 // atomicrmw.end:
791 // [...]
Duncan P. N. Exon Smith8f11e1a2015-10-09 16:54:49 +0000792 BasicBlock *ExitBB = BB->splitBasicBlock(AI->getIterator(), "atomicrmw.end");
JF Bastiene8aad292015-08-03 15:29:47 +0000793 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
794
795 // This grabs the DebugLoc from AI.
796 IRBuilder<> Builder(AI);
797
798 // The split call above "helpfully" added a branch at the end of BB (to the
799 // wrong place), but we want a load. It's easiest to just remove
800 // the branch entirely.
801 std::prev(BB->end())->eraseFromParent();
802 Builder.SetInsertPoint(BB);
803 LoadInst *InitLoaded = Builder.CreateLoad(Addr);
804 // Atomics require at least natural alignment.
Richard Diamondbd753c92015-08-06 16:55:03 +0000805 InitLoaded->setAlignment(AI->getType()->getPrimitiveSizeInBits() / 8);
JF Bastiene8aad292015-08-03 15:29:47 +0000806 Builder.CreateBr(LoopBB);
807
808 // Start the main loop block now that we've taken care of the preliminaries.
809 Builder.SetInsertPoint(LoopBB);
810 PHINode *Loaded = Builder.CreatePHI(AI->getType(), 2, "loaded");
811 Loaded->addIncoming(InitLoaded, BB);
812
813 Value *NewVal =
814 performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
815
816 Value *NewLoaded = nullptr;
817 Value *Success = nullptr;
818
819 CreateCmpXchg(Builder, Addr, Loaded, NewVal, MemOpOrder,
820 Success, NewLoaded);
821 assert(Success && NewLoaded);
822
823 Loaded->addIncoming(NewLoaded, LoopBB);
824
825 Builder.CreateCondBr(Success, ExitBB, LoopBB);
826
827 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
828
829 AI->replaceAllUsesWith(NewLoaded);
830 AI->eraseFromParent();
831
832 return true;
833}