blob: bef9682fbd5b71e8e98603391ba69d0c93ad873c [file] [log] [blame]
Eugene Zelenkof1933322017-09-22 23:46:57 +00001//===- AtomicExpandPass.cpp - Expand atomic instructions ------------------===//
Tim Northoverc882eb02014-04-03 11:44:58 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tim Northoverc882eb02014-04-03 11:44:58 +00006//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a pass (at IR level) to replace atomic instructions with
James Y Knight19f6cce2016-04-12 20:18:48 +000010// __atomic_* library calls, or target specific instruction which implement the
11// same semantics in a way which better fits the target backend. This can
12// include the use of (intrinsic-based) load-linked/store-conditional loops,
13// AtomicCmpXchg, or type coercions.
Tim Northoverc882eb02014-04-03 11:44:58 +000014//
15//===----------------------------------------------------------------------===//
16
Eugene Zelenkof1933322017-09-22 23:46:57 +000017#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/SmallVector.h"
JF Bastiene8aad292015-08-03 15:29:47 +000020#include "llvm/CodeGen/AtomicExpandUtils.h"
Eugene Zelenkof1933322017-09-22 23:46:57 +000021#include "llvm/CodeGen/RuntimeLibcalls.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000022#include "llvm/CodeGen/TargetLowering.h"
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +000023#include "llvm/CodeGen/TargetPassConfig.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000024#include "llvm/CodeGen/TargetSubtargetInfo.h"
Craig Topper2fa14362018-03-29 17:21:10 +000025#include "llvm/CodeGen/ValueTypes.h"
Eugene Zelenkof1933322017-09-22 23:46:57 +000026#include "llvm/IR/Attributes.h"
27#include "llvm/IR/BasicBlock.h"
28#include "llvm/IR/Constant.h"
29#include "llvm/IR/Constants.h"
30#include "llvm/IR/DataLayout.h"
31#include "llvm/IR/DerivedTypes.h"
Tim Northoverc882eb02014-04-03 11:44:58 +000032#include "llvm/IR/Function.h"
33#include "llvm/IR/IRBuilder.h"
Robin Morisseted3d48f2014-09-03 21:29:59 +000034#include "llvm/IR/InstIterator.h"
Eugene Zelenkof1933322017-09-22 23:46:57 +000035#include "llvm/IR/Instruction.h"
Tim Northoverc882eb02014-04-03 11:44:58 +000036#include "llvm/IR/Instructions.h"
Tim Northoverc882eb02014-04-03 11:44:58 +000037#include "llvm/IR/Module.h"
Eugene Zelenkof1933322017-09-22 23:46:57 +000038#include "llvm/IR/Type.h"
39#include "llvm/IR/User.h"
40#include "llvm/IR/Value.h"
41#include "llvm/Pass.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
Tim Northoverc882eb02014-04-03 11:44:58 +000044#include "llvm/Support/Debug.h"
Eugene Zelenkof1933322017-09-22 23:46:57 +000045#include "llvm/Support/ErrorHandling.h"
Philip Reames23319012015-12-16 01:24:05 +000046#include "llvm/Support/raw_ostream.h"
Tim Northoverc882eb02014-04-03 11:44:58 +000047#include "llvm/Target/TargetMachine.h"
Eugene Zelenkof1933322017-09-22 23:46:57 +000048#include <cassert>
49#include <cstdint>
50#include <iterator>
Eric Christopherc40e5ed2014-06-19 21:03:04 +000051
Tim Northoverc882eb02014-04-03 11:44:58 +000052using namespace llvm;
53
Robin Morisset59c23cd2014-08-21 21:50:01 +000054#define DEBUG_TYPE "atomic-expand"
Chandler Carruth1b9dde02014-04-22 02:02:50 +000055
Tim Northoverc882eb02014-04-03 11:44:58 +000056namespace {
Eugene Zelenkof1933322017-09-22 23:46:57 +000057
Robin Morisset59c23cd2014-08-21 21:50:01 +000058 class AtomicExpand: public FunctionPass {
Eugene Zelenkof1933322017-09-22 23:46:57 +000059 const TargetLowering *TLI = nullptr;
60
Tim Northoverc882eb02014-04-03 11:44:58 +000061 public:
62 static char ID; // Pass identification, replacement for typeid
Eugene Zelenkof1933322017-09-22 23:46:57 +000063
64 AtomicExpand() : FunctionPass(ID) {
Robin Morisset59c23cd2014-08-21 21:50:01 +000065 initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
Tim Northover037f26f22014-04-17 18:22:47 +000066 }
Tim Northoverc882eb02014-04-03 11:44:58 +000067
68 bool runOnFunction(Function &F) override;
Tim Northoverc882eb02014-04-03 11:44:58 +000069
Robin Morisseted3d48f2014-09-03 21:29:59 +000070 private:
Tim Shen04de70d2017-05-09 15:27:17 +000071 bool bracketInstWithFences(Instruction *I, AtomicOrdering Order);
Philip Reames61a24ab2015-12-16 00:49:36 +000072 IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL);
73 LoadInst *convertAtomicLoadToIntegerType(LoadInst *LI);
Ahmed Bougacha52468672015-09-11 17:08:28 +000074 bool tryExpandAtomicLoad(LoadInst *LI);
Robin Morisset6dbbbc22014-09-23 20:59:25 +000075 bool expandAtomicLoadToLL(LoadInst *LI);
76 bool expandAtomicLoadToCmpXchg(LoadInst *LI);
Philip Reames61a24ab2015-12-16 00:49:36 +000077 StoreInst *convertAtomicStoreToIntegerType(StoreInst *SI);
Robin Morisseted3d48f2014-09-03 21:29:59 +000078 bool expandAtomicStore(StoreInst *SI);
JF Bastienf14889e2015-03-04 15:47:57 +000079 bool tryExpandAtomicRMW(AtomicRMWInst *AI);
James Y Knight148a6462016-06-17 18:11:48 +000080 Value *
81 insertRMWLLSCLoop(IRBuilder<> &Builder, Type *ResultTy, Value *Addr,
82 AtomicOrdering MemOpOrder,
83 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp);
84 void expandAtomicOpToLLSC(
85 Instruction *I, Type *ResultTy, Value *Addr, AtomicOrdering MemOpOrder,
Benjamin Kramerd3f4c052016-06-12 16:13:55 +000086 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp);
James Y Knight148a6462016-06-17 18:11:48 +000087 void expandPartwordAtomicRMW(
88 AtomicRMWInst *I,
89 TargetLoweringBase::AtomicExpansionKind ExpansionKind);
Alex Bradbury3291f9a2018-08-17 14:03:37 +000090 AtomicRMWInst *widenPartwordAtomicRMW(AtomicRMWInst *AI);
James Y Knight148a6462016-06-17 18:11:48 +000091 void expandPartwordCmpXchg(AtomicCmpXchgInst *I);
Alex Bradbury21aea512018-09-19 10:54:22 +000092 void expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI);
Alex Bradbury66d9a752018-11-29 20:43:42 +000093 void expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI);
James Y Knight148a6462016-06-17 18:11:48 +000094
Philip Reames1960cfd2016-02-19 00:06:41 +000095 AtomicCmpXchgInst *convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI);
James Y Knight148a6462016-06-17 18:11:48 +000096 static Value *insertRMWCmpXchgLoop(
97 IRBuilder<> &Builder, Type *ResultType, Value *Addr,
98 AtomicOrdering MemOpOrder,
99 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp,
100 CreateCmpXchgInstFun CreateCmpXchg);
Alex Bradbury79518b02018-09-19 14:51:42 +0000101 bool tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI);
James Y Knight148a6462016-06-17 18:11:48 +0000102
Tim Northoverc882eb02014-04-03 11:44:58 +0000103 bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
Fangrui Songcb0bab82018-07-16 18:51:40 +0000104 bool isIdempotentRMW(AtomicRMWInst *RMWI);
105 bool simplifyIdempotentRMW(AtomicRMWInst *RMWI);
James Y Knight19f6cce2016-04-12 20:18:48 +0000106
107 bool expandAtomicOpToLibcall(Instruction *I, unsigned Size, unsigned Align,
108 Value *PointerOperand, Value *ValueOperand,
109 Value *CASExpected, AtomicOrdering Ordering,
110 AtomicOrdering Ordering2,
111 ArrayRef<RTLIB::Libcall> Libcalls);
112 void expandAtomicLoadToLibcall(LoadInst *LI);
113 void expandAtomicStoreToLibcall(StoreInst *LI);
114 void expandAtomicRMWToLibcall(AtomicRMWInst *I);
115 void expandAtomicCASToLibcall(AtomicCmpXchgInst *I);
James Y Knight148a6462016-06-17 18:11:48 +0000116
117 friend bool
118 llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
119 CreateCmpXchgInstFun CreateCmpXchg);
Tim Northoverc882eb02014-04-03 11:44:58 +0000120 };
Eugene Zelenkof1933322017-09-22 23:46:57 +0000121
122} // end anonymous namespace
Tim Northoverc882eb02014-04-03 11:44:58 +0000123
Robin Morisset59c23cd2014-08-21 21:50:01 +0000124char AtomicExpand::ID = 0;
Eugene Zelenkof1933322017-09-22 23:46:57 +0000125
Robin Morisset59c23cd2014-08-21 21:50:01 +0000126char &llvm::AtomicExpandID = AtomicExpand::ID;
Eugene Zelenkof1933322017-09-22 23:46:57 +0000127
Matthias Braun1527baa2017-05-25 21:26:32 +0000128INITIALIZE_PASS(AtomicExpand, DEBUG_TYPE, "Expand Atomic instructions",
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000129 false, false)
Tim Northover037f26f22014-04-17 18:22:47 +0000130
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000131FunctionPass *llvm::createAtomicExpandPass() { return new AtomicExpand(); }
Tim Northover037f26f22014-04-17 18:22:47 +0000132
James Y Knight19f6cce2016-04-12 20:18:48 +0000133// Helper functions to retrieve the size of atomic instructions.
Eugene Zelenkof1933322017-09-22 23:46:57 +0000134static unsigned getAtomicOpSize(LoadInst *LI) {
James Y Knight19f6cce2016-04-12 20:18:48 +0000135 const DataLayout &DL = LI->getModule()->getDataLayout();
136 return DL.getTypeStoreSize(LI->getType());
137}
138
Eugene Zelenkof1933322017-09-22 23:46:57 +0000139static unsigned getAtomicOpSize(StoreInst *SI) {
James Y Knight19f6cce2016-04-12 20:18:48 +0000140 const DataLayout &DL = SI->getModule()->getDataLayout();
141 return DL.getTypeStoreSize(SI->getValueOperand()->getType());
142}
143
Eugene Zelenkof1933322017-09-22 23:46:57 +0000144static unsigned getAtomicOpSize(AtomicRMWInst *RMWI) {
James Y Knight19f6cce2016-04-12 20:18:48 +0000145 const DataLayout &DL = RMWI->getModule()->getDataLayout();
146 return DL.getTypeStoreSize(RMWI->getValOperand()->getType());
147}
148
Eugene Zelenkof1933322017-09-22 23:46:57 +0000149static unsigned getAtomicOpSize(AtomicCmpXchgInst *CASI) {
James Y Knight19f6cce2016-04-12 20:18:48 +0000150 const DataLayout &DL = CASI->getModule()->getDataLayout();
151 return DL.getTypeStoreSize(CASI->getCompareOperand()->getType());
152}
153
154// Helper functions to retrieve the alignment of atomic instructions.
Eugene Zelenkof1933322017-09-22 23:46:57 +0000155static unsigned getAtomicOpAlign(LoadInst *LI) {
James Y Knight19f6cce2016-04-12 20:18:48 +0000156 unsigned Align = LI->getAlignment();
157 // In the future, if this IR restriction is relaxed, we should
158 // return DataLayout::getABITypeAlignment when there's no align
159 // value.
160 assert(Align != 0 && "An atomic LoadInst always has an explicit alignment");
161 return Align;
162}
163
Eugene Zelenkof1933322017-09-22 23:46:57 +0000164static unsigned getAtomicOpAlign(StoreInst *SI) {
James Y Knight19f6cce2016-04-12 20:18:48 +0000165 unsigned Align = SI->getAlignment();
166 // In the future, if this IR restriction is relaxed, we should
167 // return DataLayout::getABITypeAlignment when there's no align
168 // value.
169 assert(Align != 0 && "An atomic StoreInst always has an explicit alignment");
170 return Align;
171}
172
Eugene Zelenkof1933322017-09-22 23:46:57 +0000173static unsigned getAtomicOpAlign(AtomicRMWInst *RMWI) {
James Y Knight19f6cce2016-04-12 20:18:48 +0000174 // TODO(PR27168): This instruction has no alignment attribute, but unlike the
175 // default alignment for load/store, the default here is to assume
176 // it has NATURAL alignment, not DataLayout-specified alignment.
177 const DataLayout &DL = RMWI->getModule()->getDataLayout();
178 return DL.getTypeStoreSize(RMWI->getValOperand()->getType());
179}
180
Eugene Zelenkof1933322017-09-22 23:46:57 +0000181static unsigned getAtomicOpAlign(AtomicCmpXchgInst *CASI) {
James Y Knight19f6cce2016-04-12 20:18:48 +0000182 // TODO(PR27168): same comment as above.
183 const DataLayout &DL = CASI->getModule()->getDataLayout();
184 return DL.getTypeStoreSize(CASI->getCompareOperand()->getType());
185}
186
187// Determine if a particular atomic operation has a supported size,
188// and is of appropriate alignment, to be passed through for target
189// lowering. (Versus turning into a __atomic libcall)
190template <typename Inst>
Eugene Zelenkof1933322017-09-22 23:46:57 +0000191static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I) {
James Y Knight19f6cce2016-04-12 20:18:48 +0000192 unsigned Size = getAtomicOpSize(I);
193 unsigned Align = getAtomicOpAlign(I);
194 return Align >= Size && Size <= TLI->getMaxAtomicSizeInBitsSupported() / 8;
195}
196
Robin Morisset59c23cd2014-08-21 21:50:01 +0000197bool AtomicExpand::runOnFunction(Function &F) {
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000198 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
199 if (!TPC)
Tim Northover037f26f22014-04-17 18:22:47 +0000200 return false;
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000201
202 auto &TM = TPC->getTM<TargetMachine>();
203 if (!TM.getSubtargetImpl(F)->enableAtomicExpand())
204 return false;
205 TLI = TM.getSubtargetImpl(F)->getTargetLowering();
Tim Northover037f26f22014-04-17 18:22:47 +0000206
Tim Northoverc882eb02014-04-03 11:44:58 +0000207 SmallVector<Instruction *, 1> AtomicInsts;
208
209 // Changing control-flow while iterating through it is a bad idea, so gather a
210 // list of all atomic instructions before we start.
James Y Knight01f2ca52016-03-28 15:05:30 +0000211 for (inst_iterator II = inst_begin(F), E = inst_end(F); II != E; ++II) {
212 Instruction *I = &*II;
213 if (I->isAtomic() && !isa<FenceInst>(I))
214 AtomicInsts.push_back(I);
Tim Northoverc882eb02014-04-03 11:44:58 +0000215 }
216
Robin Morisseted3d48f2014-09-03 21:29:59 +0000217 bool MadeChange = false;
218 for (auto I : AtomicInsts) {
219 auto LI = dyn_cast<LoadInst>(I);
220 auto SI = dyn_cast<StoreInst>(I);
221 auto RMWI = dyn_cast<AtomicRMWInst>(I);
222 auto CASI = dyn_cast<AtomicCmpXchgInst>(I);
James Y Knight01f2ca52016-03-28 15:05:30 +0000223 assert((LI || SI || RMWI || CASI) && "Unknown atomic instruction");
Robin Morisseted3d48f2014-09-03 21:29:59 +0000224
James Y Knight19f6cce2016-04-12 20:18:48 +0000225 // If the Size/Alignment is not supported, replace with a libcall.
226 if (LI) {
227 if (!atomicSizeSupported(TLI, LI)) {
228 expandAtomicLoadToLibcall(LI);
229 MadeChange = true;
230 continue;
231 }
232 } else if (SI) {
233 if (!atomicSizeSupported(TLI, SI)) {
234 expandAtomicStoreToLibcall(SI);
235 MadeChange = true;
236 continue;
237 }
238 } else if (RMWI) {
239 if (!atomicSizeSupported(TLI, RMWI)) {
240 expandAtomicRMWToLibcall(RMWI);
241 MadeChange = true;
242 continue;
243 }
244 } else if (CASI) {
245 if (!atomicSizeSupported(TLI, CASI)) {
246 expandAtomicCASToLibcall(CASI);
247 MadeChange = true;
248 continue;
249 }
250 }
251
James Y Knightf44fc522016-03-16 22:12:04 +0000252 if (TLI->shouldInsertFencesForAtomic(I)) {
JF Bastien800f87a2016-04-06 21:19:33 +0000253 auto FenceOrdering = AtomicOrdering::Monotonic;
JF Bastien800f87a2016-04-06 21:19:33 +0000254 if (LI && isAcquireOrStronger(LI->getOrdering())) {
Robin Morissetdedef332014-09-23 20:31:14 +0000255 FenceOrdering = LI->getOrdering();
JF Bastien800f87a2016-04-06 21:19:33 +0000256 LI->setOrdering(AtomicOrdering::Monotonic);
JF Bastien800f87a2016-04-06 21:19:33 +0000257 } else if (SI && isReleaseOrStronger(SI->getOrdering())) {
Robin Morissetdedef332014-09-23 20:31:14 +0000258 FenceOrdering = SI->getOrdering();
JF Bastien800f87a2016-04-06 21:19:33 +0000259 SI->setOrdering(AtomicOrdering::Monotonic);
JF Bastien800f87a2016-04-06 21:19:33 +0000260 } else if (RMWI && (isReleaseOrStronger(RMWI->getOrdering()) ||
261 isAcquireOrStronger(RMWI->getOrdering()))) {
Robin Morissetdedef332014-09-23 20:31:14 +0000262 FenceOrdering = RMWI->getOrdering();
JF Bastien800f87a2016-04-06 21:19:33 +0000263 RMWI->setOrdering(AtomicOrdering::Monotonic);
Alex Bradbury79518b02018-09-19 14:51:42 +0000264 } else if (CASI &&
265 TLI->shouldExpandAtomicCmpXchgInIR(CASI) ==
266 TargetLoweringBase::AtomicExpansionKind::None &&
JF Bastien800f87a2016-04-06 21:19:33 +0000267 (isReleaseOrStronger(CASI->getSuccessOrdering()) ||
268 isAcquireOrStronger(CASI->getSuccessOrdering()))) {
Robin Morissetdedef332014-09-23 20:31:14 +0000269 // If a compare and swap is lowered to LL/SC, we can do smarter fence
270 // insertion, with a stronger one on the success path than on the
271 // failure path. As a result, fence insertion is directly done by
272 // expandAtomicCmpXchg in that case.
273 FenceOrdering = CASI->getSuccessOrdering();
JF Bastien800f87a2016-04-06 21:19:33 +0000274 CASI->setSuccessOrdering(AtomicOrdering::Monotonic);
275 CASI->setFailureOrdering(AtomicOrdering::Monotonic);
Robin Morissetdedef332014-09-23 20:31:14 +0000276 }
277
JF Bastien800f87a2016-04-06 21:19:33 +0000278 if (FenceOrdering != AtomicOrdering::Monotonic) {
Tim Shen04de70d2017-05-09 15:27:17 +0000279 MadeChange |= bracketInstWithFences(I, FenceOrdering);
Robin Morissetdedef332014-09-23 20:31:14 +0000280 }
281 }
282
Ahmed Bougacha52468672015-09-11 17:08:28 +0000283 if (LI) {
Philip Reames61a24ab2015-12-16 00:49:36 +0000284 if (LI->getType()->isFloatingPointTy()) {
285 // TODO: add a TLI hook to control this so that each target can
286 // convert to lowering the original type one at a time.
287 LI = convertAtomicLoadToIntegerType(LI);
288 assert(LI->getType()->isIntegerTy() && "invariant broken");
289 MadeChange = true;
290 }
James Y Knight19f6cce2016-04-12 20:18:48 +0000291
Ahmed Bougacha52468672015-09-11 17:08:28 +0000292 MadeChange |= tryExpandAtomicLoad(LI);
Philip Reames61a24ab2015-12-16 00:49:36 +0000293 } else if (SI) {
294 if (SI->getValueOperand()->getType()->isFloatingPointTy()) {
295 // TODO: add a TLI hook to control this so that each target can
296 // convert to lowering the original type one at a time.
297 SI = convertAtomicStoreToIntegerType(SI);
298 assert(SI->getValueOperand()->getType()->isIntegerTy() &&
299 "invariant broken");
300 MadeChange = true;
301 }
302
303 if (TLI->shouldExpandAtomicStoreInIR(SI))
304 MadeChange |= expandAtomicStore(SI);
Robin Morisset810739d2014-09-25 17:27:43 +0000305 } else if (RMWI) {
306 // There are two different ways of expanding RMW instructions:
307 // - into a load if it is idempotent
308 // - into a Cmpxchg/LL-SC loop otherwise
309 // we try them in that order.
JF Bastienf14889e2015-03-04 15:47:57 +0000310
311 if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
312 MadeChange = true;
313 } else {
Alex Bradbury3291f9a2018-08-17 14:03:37 +0000314 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
315 unsigned ValueSize = getAtomicOpSize(RMWI);
316 AtomicRMWInst::BinOp Op = RMWI->getOperation();
317 if (ValueSize < MinCASSize &&
318 (Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor ||
319 Op == AtomicRMWInst::And)) {
320 RMWI = widenPartwordAtomicRMW(RMWI);
321 MadeChange = true;
322 }
323
JF Bastienf14889e2015-03-04 15:47:57 +0000324 MadeChange |= tryExpandAtomicRMW(RMWI);
325 }
Philip Reames1960cfd2016-02-19 00:06:41 +0000326 } else if (CASI) {
327 // TODO: when we're ready to make the change at the IR level, we can
328 // extend convertCmpXchgToInteger for floating point too.
329 assert(!CASI->getCompareOperand()->getType()->isFloatingPointTy() &&
330 "unimplemented - floating point not legal at IR level");
331 if (CASI->getCompareOperand()->getType()->isPointerTy() ) {
332 // TODO: add a TLI hook to control this so that each target can
333 // convert to lowering the original type one at a time.
334 CASI = convertCmpXchgToIntegerType(CASI);
335 assert(CASI->getCompareOperand()->getType()->isIntegerTy() &&
336 "invariant broken");
337 MadeChange = true;
338 }
James Y Knight148a6462016-06-17 18:11:48 +0000339
Alex Bradbury79518b02018-09-19 14:51:42 +0000340 MadeChange |= tryExpandAtomicCmpXchg(CASI);
Robin Morisseted3d48f2014-09-03 21:29:59 +0000341 }
342 }
Tim Northoverc882eb02014-04-03 11:44:58 +0000343 return MadeChange;
344}
345
Tim Shen04de70d2017-05-09 15:27:17 +0000346bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order) {
Robin Morissetdedef332014-09-23 20:31:14 +0000347 IRBuilder<> Builder(I);
348
Tim Shen04de70d2017-05-09 15:27:17 +0000349 auto LeadingFence = TLI->emitLeadingFence(Builder, I, Order);
Robin Morissetdedef332014-09-23 20:31:14 +0000350
Tim Shen04de70d2017-05-09 15:27:17 +0000351 auto TrailingFence = TLI->emitTrailingFence(Builder, I, Order);
Robin Morissetdedef332014-09-23 20:31:14 +0000352 // We have a guard here because not every atomic operation generates a
353 // trailing fence.
Sanjay Patel674d2c22017-08-29 14:07:48 +0000354 if (TrailingFence)
355 TrailingFence->moveAfter(I);
Robin Morissetdedef332014-09-23 20:31:14 +0000356
357 return (LeadingFence || TrailingFence);
358}
359
Philip Reames61a24ab2015-12-16 00:49:36 +0000360/// Get the iX type with the same bitwidth as T.
361IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T,
362 const DataLayout &DL) {
363 EVT VT = TLI->getValueType(DL, T);
364 unsigned BitWidth = VT.getStoreSizeInBits();
365 assert(BitWidth == VT.getSizeInBits() && "must be a power of two");
366 return IntegerType::get(T->getContext(), BitWidth);
367}
368
369/// Convert an atomic load of a non-integral type to an integer load of the
Philip Reames1960cfd2016-02-19 00:06:41 +0000370/// equivalent bitwidth. See the function comment on
Fangrui Songf78650a2018-07-30 19:41:25 +0000371/// convertAtomicStoreToIntegerType for background.
Philip Reames61a24ab2015-12-16 00:49:36 +0000372LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
373 auto *M = LI->getModule();
374 Type *NewTy = getCorrespondingIntegerType(LI->getType(),
375 M->getDataLayout());
376
377 IRBuilder<> Builder(LI);
Fangrui Songf78650a2018-07-30 19:41:25 +0000378
Philip Reames61a24ab2015-12-16 00:49:36 +0000379 Value *Addr = LI->getPointerOperand();
380 Type *PT = PointerType::get(NewTy,
381 Addr->getType()->getPointerAddressSpace());
382 Value *NewAddr = Builder.CreateBitCast(Addr, PT);
Fangrui Songf78650a2018-07-30 19:41:25 +0000383
Philip Reames61a24ab2015-12-16 00:49:36 +0000384 auto *NewLI = Builder.CreateLoad(NewAddr);
385 NewLI->setAlignment(LI->getAlignment());
386 NewLI->setVolatile(LI->isVolatile());
Konstantin Zhuravlyovbb80d3e2017-07-11 22:23:00 +0000387 NewLI->setAtomic(LI->getOrdering(), LI->getSyncScopeID());
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000388 LLVM_DEBUG(dbgs() << "Replaced " << *LI << " with " << *NewLI << "\n");
389
Philip Reames61a24ab2015-12-16 00:49:36 +0000390 Value *NewVal = Builder.CreateBitCast(NewLI, LI->getType());
391 LI->replaceAllUsesWith(NewVal);
392 LI->eraseFromParent();
393 return NewLI;
394}
395
Ahmed Bougacha52468672015-09-11 17:08:28 +0000396bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) {
397 switch (TLI->shouldExpandAtomicLoadInIR(LI)) {
398 case TargetLoweringBase::AtomicExpansionKind::None:
399 return false;
Tim Northoverf520eff2015-12-02 18:12:57 +0000400 case TargetLoweringBase::AtomicExpansionKind::LLSC:
James Y Knight148a6462016-06-17 18:11:48 +0000401 expandAtomicOpToLLSC(
402 LI, LI->getType(), LI->getPointerOperand(), LI->getOrdering(),
Tim Northoverf520eff2015-12-02 18:12:57 +0000403 [](IRBuilder<> &Builder, Value *Loaded) { return Loaded; });
James Y Knight148a6462016-06-17 18:11:48 +0000404 return true;
Tim Northoverf520eff2015-12-02 18:12:57 +0000405 case TargetLoweringBase::AtomicExpansionKind::LLOnly:
Robin Morisset6dbbbc22014-09-23 20:59:25 +0000406 return expandAtomicLoadToLL(LI);
Tim Northoverf520eff2015-12-02 18:12:57 +0000407 case TargetLoweringBase::AtomicExpansionKind::CmpXChg:
Robin Morisset6dbbbc22014-09-23 20:59:25 +0000408 return expandAtomicLoadToCmpXchg(LI);
Alex Bradbury21aea512018-09-19 10:54:22 +0000409 default:
410 llvm_unreachable("Unhandled case in tryExpandAtomicLoad");
Ahmed Bougacha52468672015-09-11 17:08:28 +0000411 }
Robin Morisset6dbbbc22014-09-23 20:59:25 +0000412}
413
414bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
Tim Northoverc882eb02014-04-03 11:44:58 +0000415 IRBuilder<> Builder(LI);
Tim Northoverc882eb02014-04-03 11:44:58 +0000416
Robin Morissetdedef332014-09-23 20:31:14 +0000417 // On some architectures, load-linked instructions are atomic for larger
418 // sizes than normal loads. For example, the only 64-bit load guaranteed
419 // to be single-copy atomic by ARM is an ldrexd (A3.5.3).
Robin Morisseta47cb412014-09-03 21:01:03 +0000420 Value *Val =
Robin Morissetdedef332014-09-23 20:31:14 +0000421 TLI->emitLoadLinked(Builder, LI->getPointerOperand(), LI->getOrdering());
Tim Northoverf520eff2015-12-02 18:12:57 +0000422 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
Tim Northoverc882eb02014-04-03 11:44:58 +0000423
424 LI->replaceAllUsesWith(Val);
425 LI->eraseFromParent();
426
427 return true;
428}
429
Robin Morisset6dbbbc22014-09-23 20:59:25 +0000430bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
431 IRBuilder<> Builder(LI);
432 AtomicOrdering Order = LI->getOrdering();
433 Value *Addr = LI->getPointerOperand();
434 Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
435 Constant *DummyVal = Constant::getNullValue(Ty);
436
437 Value *Pair = Builder.CreateAtomicCmpXchg(
438 Addr, DummyVal, DummyVal, Order,
439 AtomicCmpXchgInst::getStrongestFailureOrdering(Order));
440 Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded");
441
442 LI->replaceAllUsesWith(Loaded);
443 LI->eraseFromParent();
444
445 return true;
446}
447
Philip Reames61a24ab2015-12-16 00:49:36 +0000448/// Convert an atomic store of a non-integral type to an integer store of the
Philip Reames1960cfd2016-02-19 00:06:41 +0000449/// equivalent bitwidth. We used to not support floating point or vector
Philip Reames61a24ab2015-12-16 00:49:36 +0000450/// atomics in the IR at all. The backends learned to deal with the bitcast
451/// idiom because that was the only way of expressing the notion of a atomic
452/// float or vector store. The long term plan is to teach each backend to
453/// instruction select from the original atomic store, but as a migration
454/// mechanism, we convert back to the old format which the backends understand.
455/// Each backend will need individual work to recognize the new format.
456StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) {
457 IRBuilder<> Builder(SI);
458 auto *M = SI->getModule();
459 Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(),
460 M->getDataLayout());
461 Value *NewVal = Builder.CreateBitCast(SI->getValueOperand(), NewTy);
Fangrui Songf78650a2018-07-30 19:41:25 +0000462
Philip Reames61a24ab2015-12-16 00:49:36 +0000463 Value *Addr = SI->getPointerOperand();
464 Type *PT = PointerType::get(NewTy,
465 Addr->getType()->getPointerAddressSpace());
466 Value *NewAddr = Builder.CreateBitCast(Addr, PT);
467
468 StoreInst *NewSI = Builder.CreateStore(NewVal, NewAddr);
469 NewSI->setAlignment(SI->getAlignment());
470 NewSI->setVolatile(SI->isVolatile());
Konstantin Zhuravlyovbb80d3e2017-07-11 22:23:00 +0000471 NewSI->setAtomic(SI->getOrdering(), SI->getSyncScopeID());
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000472 LLVM_DEBUG(dbgs() << "Replaced " << *SI << " with " << *NewSI << "\n");
Philip Reames61a24ab2015-12-16 00:49:36 +0000473 SI->eraseFromParent();
474 return NewSI;
475}
476
Robin Morisset59c23cd2014-08-21 21:50:01 +0000477bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
Robin Morisset25c8e312014-09-17 00:06:58 +0000478 // This function is only called on atomic stores that are too large to be
479 // atomic if implemented as a native store. So we replace them by an
480 // atomic swap, that can be implemented for example as a ldrex/strex on ARM
481 // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes.
JF Bastienf14889e2015-03-04 15:47:57 +0000482 // It is the responsibility of the target to only signal expansion via
Robin Morisset25c8e312014-09-17 00:06:58 +0000483 // shouldExpandAtomicRMW in cases where this is required and possible.
Tim Northoverc882eb02014-04-03 11:44:58 +0000484 IRBuilder<> Builder(SI);
485 AtomicRMWInst *AI =
486 Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
487 SI->getValueOperand(), SI->getOrdering());
488 SI->eraseFromParent();
489
490 // Now we have an appropriate swap instruction, lower it as usual.
JF Bastienf14889e2015-03-04 15:47:57 +0000491 return tryExpandAtomicRMW(AI);
Tim Northoverc882eb02014-04-03 11:44:58 +0000492}
493
JF Bastiene8aad292015-08-03 15:29:47 +0000494static void createCmpXchgInstFun(IRBuilder<> &Builder, Value *Addr,
495 Value *Loaded, Value *NewVal,
496 AtomicOrdering MemOpOrder,
497 Value *&Success, Value *&NewLoaded) {
Matt Arsenault0cb08e42019-01-17 10:49:01 +0000498 Type *OrigTy = NewVal->getType();
499
500 // This code can go away when cmpxchg supports FP types.
501 bool NeedBitcast = OrigTy->isFloatingPointTy();
502 if (NeedBitcast) {
503 IntegerType *IntTy = Builder.getIntNTy(OrigTy->getPrimitiveSizeInBits());
504 unsigned AS = Addr->getType()->getPointerAddressSpace();
505 Addr = Builder.CreateBitCast(Addr, IntTy->getPointerTo(AS));
506 NewVal = Builder.CreateBitCast(NewVal, IntTy);
507 Loaded = Builder.CreateBitCast(Loaded, IntTy);
508 }
509
JF Bastiene8aad292015-08-03 15:29:47 +0000510 Value* Pair = Builder.CreateAtomicCmpXchg(
511 Addr, Loaded, NewVal, MemOpOrder,
512 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder));
513 Success = Builder.CreateExtractValue(Pair, 1, "success");
514 NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
Matt Arsenault0cb08e42019-01-17 10:49:01 +0000515
516 if (NeedBitcast)
517 NewLoaded = Builder.CreateBitCast(NewLoaded, OrigTy);
JF Bastiene8aad292015-08-03 15:29:47 +0000518}
519
Robin Morisset25c8e312014-09-17 00:06:58 +0000520/// Emit IR to implement the given atomicrmw operation on values in registers,
521/// returning the new value.
522static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
523 Value *Loaded, Value *Inc) {
524 Value *NewVal;
525 switch (Op) {
526 case AtomicRMWInst::Xchg:
527 return Inc;
528 case AtomicRMWInst::Add:
529 return Builder.CreateAdd(Loaded, Inc, "new");
530 case AtomicRMWInst::Sub:
531 return Builder.CreateSub(Loaded, Inc, "new");
532 case AtomicRMWInst::And:
533 return Builder.CreateAnd(Loaded, Inc, "new");
534 case AtomicRMWInst::Nand:
535 return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new");
536 case AtomicRMWInst::Or:
537 return Builder.CreateOr(Loaded, Inc, "new");
538 case AtomicRMWInst::Xor:
539 return Builder.CreateXor(Loaded, Inc, "new");
540 case AtomicRMWInst::Max:
541 NewVal = Builder.CreateICmpSGT(Loaded, Inc);
542 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
543 case AtomicRMWInst::Min:
544 NewVal = Builder.CreateICmpSLE(Loaded, Inc);
545 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
546 case AtomicRMWInst::UMax:
547 NewVal = Builder.CreateICmpUGT(Loaded, Inc);
548 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
549 case AtomicRMWInst::UMin:
550 NewVal = Builder.CreateICmpULE(Loaded, Inc);
551 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
552 default:
553 llvm_unreachable("Unknown atomic op");
554 }
555}
556
Tim Northoverf520eff2015-12-02 18:12:57 +0000557bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) {
558 switch (TLI->shouldExpandAtomicRMWInIR(AI)) {
559 case TargetLoweringBase::AtomicExpansionKind::None:
560 return false;
James Y Knight148a6462016-06-17 18:11:48 +0000561 case TargetLoweringBase::AtomicExpansionKind::LLSC: {
562 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
563 unsigned ValueSize = getAtomicOpSize(AI);
564 if (ValueSize < MinCASSize) {
565 llvm_unreachable(
566 "MinCmpXchgSizeInBits not yet supported for LL/SC architectures.");
567 } else {
568 auto PerformOp = [&](IRBuilder<> &Builder, Value *Loaded) {
569 return performAtomicOp(AI->getOperation(), Builder, Loaded,
570 AI->getValOperand());
571 };
572 expandAtomicOpToLLSC(AI, AI->getType(), AI->getPointerOperand(),
573 AI->getOrdering(), PerformOp);
574 }
575 return true;
576 }
577 case TargetLoweringBase::AtomicExpansionKind::CmpXChg: {
578 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
579 unsigned ValueSize = getAtomicOpSize(AI);
580 if (ValueSize < MinCASSize) {
581 expandPartwordAtomicRMW(AI,
582 TargetLoweringBase::AtomicExpansionKind::CmpXChg);
583 } else {
584 expandAtomicRMWToCmpXchg(AI, createCmpXchgInstFun);
585 }
586 return true;
587 }
Alex Bradbury21aea512018-09-19 10:54:22 +0000588 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic: {
589 expandAtomicRMWToMaskedIntrinsic(AI);
590 return true;
591 }
Tim Northoverf520eff2015-12-02 18:12:57 +0000592 default:
593 llvm_unreachable("Unhandled case in tryExpandAtomicRMW");
594 }
595}
596
James Y Knight148a6462016-06-17 18:11:48 +0000597namespace {
598
599/// Result values from createMaskInstrs helper.
600struct PartwordMaskValues {
601 Type *WordType;
602 Type *ValueType;
603 Value *AlignedAddr;
604 Value *ShiftAmt;
605 Value *Mask;
606 Value *Inv_Mask;
607};
Eugene Zelenkof1933322017-09-22 23:46:57 +0000608
James Y Knight148a6462016-06-17 18:11:48 +0000609} // end anonymous namespace
610
611/// This is a helper function which builds instructions to provide
612/// values necessary for partword atomic operations. It takes an
613/// incoming address, Addr, and ValueType, and constructs the address,
614/// shift-amounts and masks needed to work with a larger value of size
615/// WordSize.
616///
617/// AlignedAddr: Addr rounded down to a multiple of WordSize
618///
619/// ShiftAmt: Number of bits to right-shift a WordSize value loaded
620/// from AlignAddr for it to have the same value as if
621/// ValueType was loaded from Addr.
622///
623/// Mask: Value to mask with the value loaded from AlignAddr to
624/// include only the part that would've been loaded from Addr.
625///
626/// Inv_Mask: The inverse of Mask.
James Y Knight148a6462016-06-17 18:11:48 +0000627static PartwordMaskValues createMaskInstrs(IRBuilder<> &Builder, Instruction *I,
628 Type *ValueType, Value *Addr,
629 unsigned WordSize) {
630 PartwordMaskValues Ret;
631
Tim Northoverf520eff2015-12-02 18:12:57 +0000632 BasicBlock *BB = I->getParent();
Tim Northoverc882eb02014-04-03 11:44:58 +0000633 Function *F = BB->getParent();
James Y Knight148a6462016-06-17 18:11:48 +0000634 Module *M = I->getModule();
635
Tim Northoverc882eb02014-04-03 11:44:58 +0000636 LLVMContext &Ctx = F->getContext();
James Y Knight148a6462016-06-17 18:11:48 +0000637 const DataLayout &DL = M->getDataLayout();
638
639 unsigned ValueSize = DL.getTypeStoreSize(ValueType);
640
641 assert(ValueSize < WordSize);
642
643 Ret.ValueType = ValueType;
644 Ret.WordType = Type::getIntNTy(Ctx, WordSize * 8);
645
646 Type *WordPtrType =
647 Ret.WordType->getPointerTo(Addr->getType()->getPointerAddressSpace());
648
649 Value *AddrInt = Builder.CreatePtrToInt(Addr, DL.getIntPtrType(Ctx));
650 Ret.AlignedAddr = Builder.CreateIntToPtr(
651 Builder.CreateAnd(AddrInt, ~(uint64_t)(WordSize - 1)), WordPtrType,
652 "AlignedAddr");
653
654 Value *PtrLSB = Builder.CreateAnd(AddrInt, WordSize - 1, "PtrLSB");
655 if (DL.isLittleEndian()) {
656 // turn bytes into bits
657 Ret.ShiftAmt = Builder.CreateShl(PtrLSB, 3);
658 } else {
659 // turn bytes into bits, and count from the other side.
660 Ret.ShiftAmt =
661 Builder.CreateShl(Builder.CreateXor(PtrLSB, WordSize - ValueSize), 3);
662 }
663
664 Ret.ShiftAmt = Builder.CreateTrunc(Ret.ShiftAmt, Ret.WordType, "ShiftAmt");
665 Ret.Mask = Builder.CreateShl(
666 ConstantInt::get(Ret.WordType, (1 << ValueSize * 8) - 1), Ret.ShiftAmt,
667 "Mask");
668 Ret.Inv_Mask = Builder.CreateNot(Ret.Mask, "Inv_Mask");
669
670 return Ret;
671}
672
673/// Emit IR to implement a masked version of a given atomicrmw
674/// operation. (That is, only the bits under the Mask should be
675/// affected by the operation)
676static Value *performMaskedAtomicOp(AtomicRMWInst::BinOp Op,
677 IRBuilder<> &Builder, Value *Loaded,
678 Value *Shifted_Inc, Value *Inc,
679 const PartwordMaskValues &PMV) {
Alex Bradbury21aea512018-09-19 10:54:22 +0000680 // TODO: update to use
681 // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge in order
682 // to merge bits from two values without requiring PMV.Inv_Mask.
James Y Knight148a6462016-06-17 18:11:48 +0000683 switch (Op) {
684 case AtomicRMWInst::Xchg: {
685 Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask);
686 Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, Shifted_Inc);
687 return FinalVal;
688 }
689 case AtomicRMWInst::Or:
690 case AtomicRMWInst::Xor:
Alex Bradbury3291f9a2018-08-17 14:03:37 +0000691 case AtomicRMWInst::And:
692 llvm_unreachable("Or/Xor/And handled by widenPartwordAtomicRMW");
James Y Knight148a6462016-06-17 18:11:48 +0000693 case AtomicRMWInst::Add:
694 case AtomicRMWInst::Sub:
James Y Knight148a6462016-06-17 18:11:48 +0000695 case AtomicRMWInst::Nand: {
696 // The other arithmetic ops need to be masked into place.
697 Value *NewVal = performAtomicOp(Op, Builder, Loaded, Shifted_Inc);
698 Value *NewVal_Masked = Builder.CreateAnd(NewVal, PMV.Mask);
699 Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask);
700 Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Masked);
701 return FinalVal;
702 }
703 case AtomicRMWInst::Max:
704 case AtomicRMWInst::Min:
705 case AtomicRMWInst::UMax:
706 case AtomicRMWInst::UMin: {
707 // Finally, comparison ops will operate on the full value, so
708 // truncate down to the original size, and expand out again after
709 // doing the operation.
710 Value *Loaded_Shiftdown = Builder.CreateTrunc(
711 Builder.CreateLShr(Loaded, PMV.ShiftAmt), PMV.ValueType);
712 Value *NewVal = performAtomicOp(Op, Builder, Loaded_Shiftdown, Inc);
713 Value *NewVal_Shiftup = Builder.CreateShl(
714 Builder.CreateZExt(NewVal, PMV.WordType), PMV.ShiftAmt);
715 Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask);
716 Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Shiftup);
717 return FinalVal;
718 }
719 default:
720 llvm_unreachable("Unknown atomic op");
721 }
722}
723
724/// Expand a sub-word atomicrmw operation into an appropriate
725/// word-sized operation.
726///
727/// It will create an LL/SC or cmpxchg loop, as appropriate, the same
728/// way as a typical atomicrmw expansion. The only difference here is
729/// that the operation inside of the loop must operate only upon a
730/// part of the value.
731void AtomicExpand::expandPartwordAtomicRMW(
732 AtomicRMWInst *AI, TargetLoweringBase::AtomicExpansionKind ExpansionKind) {
James Y Knight148a6462016-06-17 18:11:48 +0000733 assert(ExpansionKind == TargetLoweringBase::AtomicExpansionKind::CmpXChg);
734
735 AtomicOrdering MemOpOrder = AI->getOrdering();
736
737 IRBuilder<> Builder(AI);
738
739 PartwordMaskValues PMV =
740 createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(),
741 TLI->getMinCmpXchgSizeInBits() / 8);
742
743 Value *ValOperand_Shifted =
744 Builder.CreateShl(Builder.CreateZExt(AI->getValOperand(), PMV.WordType),
745 PMV.ShiftAmt, "ValOperand_Shifted");
746
747 auto PerformPartwordOp = [&](IRBuilder<> &Builder, Value *Loaded) {
748 return performMaskedAtomicOp(AI->getOperation(), Builder, Loaded,
749 ValOperand_Shifted, AI->getValOperand(), PMV);
750 };
751
752 // TODO: When we're ready to support LLSC conversions too, use
753 // insertRMWLLSCLoop here for ExpansionKind==LLSC.
754 Value *OldResult =
755 insertRMWCmpXchgLoop(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder,
756 PerformPartwordOp, createCmpXchgInstFun);
757 Value *FinalOldResult = Builder.CreateTrunc(
758 Builder.CreateLShr(OldResult, PMV.ShiftAmt), PMV.ValueType);
759 AI->replaceAllUsesWith(FinalOldResult);
760 AI->eraseFromParent();
761}
762
Alex Bradbury3291f9a2018-08-17 14:03:37 +0000763// Widen the bitwise atomicrmw (or/xor/and) to the minimum supported width.
764AtomicRMWInst *AtomicExpand::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
765 IRBuilder<> Builder(AI);
766 AtomicRMWInst::BinOp Op = AI->getOperation();
767
768 assert((Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor ||
769 Op == AtomicRMWInst::And) &&
770 "Unable to widen operation");
771
772 PartwordMaskValues PMV =
773 createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(),
774 TLI->getMinCmpXchgSizeInBits() / 8);
775
776 Value *ValOperand_Shifted =
777 Builder.CreateShl(Builder.CreateZExt(AI->getValOperand(), PMV.WordType),
778 PMV.ShiftAmt, "ValOperand_Shifted");
779
780 Value *NewOperand;
781
782 if (Op == AtomicRMWInst::And)
783 NewOperand =
784 Builder.CreateOr(PMV.Inv_Mask, ValOperand_Shifted, "AndOperand");
785 else
786 NewOperand = ValOperand_Shifted;
787
788 AtomicRMWInst *NewAI = Builder.CreateAtomicRMW(Op, PMV.AlignedAddr,
789 NewOperand, AI->getOrdering());
790
791 Value *FinalOldResult = Builder.CreateTrunc(
792 Builder.CreateLShr(NewAI, PMV.ShiftAmt), PMV.ValueType);
793 AI->replaceAllUsesWith(FinalOldResult);
794 AI->eraseFromParent();
795 return NewAI;
796}
797
James Y Knight148a6462016-06-17 18:11:48 +0000798void AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
799 // The basic idea here is that we're expanding a cmpxchg of a
800 // smaller memory size up to a word-sized cmpxchg. To do this, we
801 // need to add a retry-loop for strong cmpxchg, so that
802 // modifications to other parts of the word don't cause a spurious
803 // failure.
804
805 // This generates code like the following:
806 // [[Setup mask values PMV.*]]
807 // %NewVal_Shifted = shl i32 %NewVal, %PMV.ShiftAmt
808 // %Cmp_Shifted = shl i32 %Cmp, %PMV.ShiftAmt
809 // %InitLoaded = load i32* %addr
810 // %InitLoaded_MaskOut = and i32 %InitLoaded, %PMV.Inv_Mask
811 // br partword.cmpxchg.loop
812 // partword.cmpxchg.loop:
813 // %Loaded_MaskOut = phi i32 [ %InitLoaded_MaskOut, %entry ],
814 // [ %OldVal_MaskOut, %partword.cmpxchg.failure ]
815 // %FullWord_NewVal = or i32 %Loaded_MaskOut, %NewVal_Shifted
816 // %FullWord_Cmp = or i32 %Loaded_MaskOut, %Cmp_Shifted
817 // %NewCI = cmpxchg i32* %PMV.AlignedAddr, i32 %FullWord_Cmp,
818 // i32 %FullWord_NewVal success_ordering failure_ordering
819 // %OldVal = extractvalue { i32, i1 } %NewCI, 0
820 // %Success = extractvalue { i32, i1 } %NewCI, 1
821 // br i1 %Success, label %partword.cmpxchg.end,
822 // label %partword.cmpxchg.failure
823 // partword.cmpxchg.failure:
824 // %OldVal_MaskOut = and i32 %OldVal, %PMV.Inv_Mask
825 // %ShouldContinue = icmp ne i32 %Loaded_MaskOut, %OldVal_MaskOut
826 // br i1 %ShouldContinue, label %partword.cmpxchg.loop,
827 // label %partword.cmpxchg.end
828 // partword.cmpxchg.end:
829 // %tmp1 = lshr i32 %OldVal, %PMV.ShiftAmt
830 // %FinalOldVal = trunc i32 %tmp1 to i8
831 // %tmp2 = insertvalue { i8, i1 } undef, i8 %FinalOldVal, 0
832 // %Res = insertvalue { i8, i1 } %25, i1 %Success, 1
833
834 Value *Addr = CI->getPointerOperand();
835 Value *Cmp = CI->getCompareOperand();
836 Value *NewVal = CI->getNewValOperand();
837
838 BasicBlock *BB = CI->getParent();
839 Function *F = BB->getParent();
840 IRBuilder<> Builder(CI);
841 LLVMContext &Ctx = Builder.getContext();
842
843 const int WordSize = TLI->getMinCmpXchgSizeInBits() / 8;
844
845 BasicBlock *EndBB =
846 BB->splitBasicBlock(CI->getIterator(), "partword.cmpxchg.end");
847 auto FailureBB =
848 BasicBlock::Create(Ctx, "partword.cmpxchg.failure", F, EndBB);
849 auto LoopBB = BasicBlock::Create(Ctx, "partword.cmpxchg.loop", F, FailureBB);
850
851 // The split call above "helpfully" added a branch at the end of BB
852 // (to the wrong place).
853 std::prev(BB->end())->eraseFromParent();
854 Builder.SetInsertPoint(BB);
855
856 PartwordMaskValues PMV = createMaskInstrs(
857 Builder, CI, CI->getCompareOperand()->getType(), Addr, WordSize);
858
859 // Shift the incoming values over, into the right location in the word.
860 Value *NewVal_Shifted =
861 Builder.CreateShl(Builder.CreateZExt(NewVal, PMV.WordType), PMV.ShiftAmt);
862 Value *Cmp_Shifted =
863 Builder.CreateShl(Builder.CreateZExt(Cmp, PMV.WordType), PMV.ShiftAmt);
864
865 // Load the entire current word, and mask into place the expected and new
866 // values
867 LoadInst *InitLoaded = Builder.CreateLoad(PMV.WordType, PMV.AlignedAddr);
868 InitLoaded->setVolatile(CI->isVolatile());
869 Value *InitLoaded_MaskOut = Builder.CreateAnd(InitLoaded, PMV.Inv_Mask);
870 Builder.CreateBr(LoopBB);
871
872 // partword.cmpxchg.loop:
873 Builder.SetInsertPoint(LoopBB);
874 PHINode *Loaded_MaskOut = Builder.CreatePHI(PMV.WordType, 2);
875 Loaded_MaskOut->addIncoming(InitLoaded_MaskOut, BB);
876
877 // Mask/Or the expected and new values into place in the loaded word.
878 Value *FullWord_NewVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Shifted);
879 Value *FullWord_Cmp = Builder.CreateOr(Loaded_MaskOut, Cmp_Shifted);
880 AtomicCmpXchgInst *NewCI = Builder.CreateAtomicCmpXchg(
881 PMV.AlignedAddr, FullWord_Cmp, FullWord_NewVal, CI->getSuccessOrdering(),
Konstantin Zhuravlyovbb80d3e2017-07-11 22:23:00 +0000882 CI->getFailureOrdering(), CI->getSyncScopeID());
James Y Knight148a6462016-06-17 18:11:48 +0000883 NewCI->setVolatile(CI->isVolatile());
884 // When we're building a strong cmpxchg, we need a loop, so you
885 // might think we could use a weak cmpxchg inside. But, using strong
886 // allows the below comparison for ShouldContinue, and we're
887 // expecting the underlying cmpxchg to be a machine instruction,
888 // which is strong anyways.
889 NewCI->setWeak(CI->isWeak());
890
891 Value *OldVal = Builder.CreateExtractValue(NewCI, 0);
892 Value *Success = Builder.CreateExtractValue(NewCI, 1);
893
894 if (CI->isWeak())
895 Builder.CreateBr(EndBB);
896 else
897 Builder.CreateCondBr(Success, EndBB, FailureBB);
898
899 // partword.cmpxchg.failure:
900 Builder.SetInsertPoint(FailureBB);
901 // Upon failure, verify that the masked-out part of the loaded value
902 // has been modified. If it didn't, abort the cmpxchg, since the
903 // masked-in part must've.
904 Value *OldVal_MaskOut = Builder.CreateAnd(OldVal, PMV.Inv_Mask);
905 Value *ShouldContinue = Builder.CreateICmpNE(Loaded_MaskOut, OldVal_MaskOut);
906 Builder.CreateCondBr(ShouldContinue, LoopBB, EndBB);
907
908 // Add the second value to the phi from above
909 Loaded_MaskOut->addIncoming(OldVal_MaskOut, FailureBB);
910
911 // partword.cmpxchg.end:
912 Builder.SetInsertPoint(CI);
913
914 Value *FinalOldVal = Builder.CreateTrunc(
915 Builder.CreateLShr(OldVal, PMV.ShiftAmt), PMV.ValueType);
916 Value *Res = UndefValue::get(CI->getType());
917 Res = Builder.CreateInsertValue(Res, FinalOldVal, 0);
918 Res = Builder.CreateInsertValue(Res, Success, 1);
919
920 CI->replaceAllUsesWith(Res);
921 CI->eraseFromParent();
922}
923
924void AtomicExpand::expandAtomicOpToLLSC(
925 Instruction *I, Type *ResultType, Value *Addr, AtomicOrdering MemOpOrder,
926 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp) {
927 IRBuilder<> Builder(I);
928 Value *Loaded =
929 insertRMWLLSCLoop(Builder, ResultType, Addr, MemOpOrder, PerformOp);
930
931 I->replaceAllUsesWith(Loaded);
932 I->eraseFromParent();
933}
934
Alex Bradbury21aea512018-09-19 10:54:22 +0000935void AtomicExpand::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
936 IRBuilder<> Builder(AI);
937
938 PartwordMaskValues PMV =
939 createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(),
940 TLI->getMinCmpXchgSizeInBits() / 8);
941
942 // The value operand must be sign-extended for signed min/max so that the
943 // target's signed comparison instructions can be used. Otherwise, just
944 // zero-ext.
945 Instruction::CastOps CastOp = Instruction::ZExt;
946 AtomicRMWInst::BinOp RMWOp = AI->getOperation();
947 if (RMWOp == AtomicRMWInst::Max || RMWOp == AtomicRMWInst::Min)
948 CastOp = Instruction::SExt;
949
950 Value *ValOperand_Shifted = Builder.CreateShl(
951 Builder.CreateCast(CastOp, AI->getValOperand(), PMV.WordType),
952 PMV.ShiftAmt, "ValOperand_Shifted");
953 Value *OldResult = TLI->emitMaskedAtomicRMWIntrinsic(
954 Builder, AI, PMV.AlignedAddr, ValOperand_Shifted, PMV.Mask, PMV.ShiftAmt,
955 AI->getOrdering());
956 Value *FinalOldResult = Builder.CreateTrunc(
957 Builder.CreateLShr(OldResult, PMV.ShiftAmt), PMV.ValueType);
958 AI->replaceAllUsesWith(FinalOldResult);
959 AI->eraseFromParent();
960}
961
Alex Bradbury66d9a752018-11-29 20:43:42 +0000962void AtomicExpand::expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI) {
963 IRBuilder<> Builder(CI);
964
965 PartwordMaskValues PMV = createMaskInstrs(
966 Builder, CI, CI->getCompareOperand()->getType(), CI->getPointerOperand(),
967 TLI->getMinCmpXchgSizeInBits() / 8);
968
969 Value *CmpVal_Shifted = Builder.CreateShl(
970 Builder.CreateZExt(CI->getCompareOperand(), PMV.WordType), PMV.ShiftAmt,
971 "CmpVal_Shifted");
972 Value *NewVal_Shifted = Builder.CreateShl(
973 Builder.CreateZExt(CI->getNewValOperand(), PMV.WordType), PMV.ShiftAmt,
974 "NewVal_Shifted");
975 Value *OldVal = TLI->emitMaskedAtomicCmpXchgIntrinsic(
976 Builder, CI, PMV.AlignedAddr, CmpVal_Shifted, NewVal_Shifted, PMV.Mask,
977 CI->getSuccessOrdering());
978 Value *FinalOldVal = Builder.CreateTrunc(
979 Builder.CreateLShr(OldVal, PMV.ShiftAmt), PMV.ValueType);
980
981 Value *Res = UndefValue::get(CI->getType());
982 Res = Builder.CreateInsertValue(Res, FinalOldVal, 0);
983 Value *Success = Builder.CreateICmpEQ(
984 CmpVal_Shifted, Builder.CreateAnd(OldVal, PMV.Mask), "Success");
985 Res = Builder.CreateInsertValue(Res, Success, 1);
986
987 CI->replaceAllUsesWith(Res);
988 CI->eraseFromParent();
989}
990
James Y Knight148a6462016-06-17 18:11:48 +0000991Value *AtomicExpand::insertRMWLLSCLoop(
992 IRBuilder<> &Builder, Type *ResultTy, Value *Addr,
993 AtomicOrdering MemOpOrder,
994 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp) {
995 LLVMContext &Ctx = Builder.getContext();
996 BasicBlock *BB = Builder.GetInsertBlock();
997 Function *F = BB->getParent();
Tim Northoverc882eb02014-04-03 11:44:58 +0000998
999 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
1000 //
1001 // The standard expansion we produce is:
1002 // [...]
Tim Northoverc882eb02014-04-03 11:44:58 +00001003 // atomicrmw.start:
1004 // %loaded = @load.linked(%addr)
1005 // %new = some_op iN %loaded, %incr
1006 // %stored = @store_conditional(%new, %addr)
1007 // %try_again = icmp i32 ne %stored, 0
1008 // br i1 %try_again, label %loop, label %atomicrmw.end
1009 // atomicrmw.end:
Tim Northoverc882eb02014-04-03 11:44:58 +00001010 // [...]
James Y Knight148a6462016-06-17 18:11:48 +00001011 BasicBlock *ExitBB =
1012 BB->splitBasicBlock(Builder.GetInsertPoint(), "atomicrmw.end");
Tim Northoverc882eb02014-04-03 11:44:58 +00001013 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
1014
Tim Northoverc882eb02014-04-03 11:44:58 +00001015 // The split call above "helpfully" added a branch at the end of BB (to the
James Y Knight148a6462016-06-17 18:11:48 +00001016 // wrong place).
Tim Northoverc882eb02014-04-03 11:44:58 +00001017 std::prev(BB->end())->eraseFromParent();
1018 Builder.SetInsertPoint(BB);
Tim Northoverc882eb02014-04-03 11:44:58 +00001019 Builder.CreateBr(LoopBB);
1020
1021 // Start the main loop block now that we've taken care of the preliminaries.
1022 Builder.SetInsertPoint(LoopBB);
Robin Morisseta47cb412014-09-03 21:01:03 +00001023 Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
Tim Northoverc882eb02014-04-03 11:44:58 +00001024
Tim Northoverf520eff2015-12-02 18:12:57 +00001025 Value *NewVal = PerformOp(Builder, Loaded);
Tim Northoverc882eb02014-04-03 11:44:58 +00001026
Eric Christopherd9134482014-08-04 21:25:23 +00001027 Value *StoreSuccess =
Robin Morisseta47cb412014-09-03 21:01:03 +00001028 TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder);
Tim Northoverc882eb02014-04-03 11:44:58 +00001029 Value *TryAgain = Builder.CreateICmpNE(
1030 StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
1031 Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
1032
1033 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
James Y Knight148a6462016-06-17 18:11:48 +00001034 return Loaded;
Tim Northoverc882eb02014-04-03 11:44:58 +00001035}
1036
Philip Reames1960cfd2016-02-19 00:06:41 +00001037/// Convert an atomic cmpxchg of a non-integral type to an integer cmpxchg of
1038/// the equivalent bitwidth. We used to not support pointer cmpxchg in the
1039/// IR. As a migration step, we convert back to what use to be the standard
1040/// way to represent a pointer cmpxchg so that we can update backends one by
Fangrui Songf78650a2018-07-30 19:41:25 +00001041/// one.
Philip Reames1960cfd2016-02-19 00:06:41 +00001042AtomicCmpXchgInst *AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
1043 auto *M = CI->getModule();
1044 Type *NewTy = getCorrespondingIntegerType(CI->getCompareOperand()->getType(),
1045 M->getDataLayout());
1046
1047 IRBuilder<> Builder(CI);
Fangrui Songf78650a2018-07-30 19:41:25 +00001048
Philip Reames1960cfd2016-02-19 00:06:41 +00001049 Value *Addr = CI->getPointerOperand();
1050 Type *PT = PointerType::get(NewTy,
1051 Addr->getType()->getPointerAddressSpace());
1052 Value *NewAddr = Builder.CreateBitCast(Addr, PT);
1053
1054 Value *NewCmp = Builder.CreatePtrToInt(CI->getCompareOperand(), NewTy);
1055 Value *NewNewVal = Builder.CreatePtrToInt(CI->getNewValOperand(), NewTy);
Fangrui Songf78650a2018-07-30 19:41:25 +00001056
1057
Philip Reames1960cfd2016-02-19 00:06:41 +00001058 auto *NewCI = Builder.CreateAtomicCmpXchg(NewAddr, NewCmp, NewNewVal,
1059 CI->getSuccessOrdering(),
1060 CI->getFailureOrdering(),
Konstantin Zhuravlyovbb80d3e2017-07-11 22:23:00 +00001061 CI->getSyncScopeID());
Philip Reames1960cfd2016-02-19 00:06:41 +00001062 NewCI->setVolatile(CI->isVolatile());
1063 NewCI->setWeak(CI->isWeak());
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001064 LLVM_DEBUG(dbgs() << "Replaced " << *CI << " with " << *NewCI << "\n");
Philip Reames1960cfd2016-02-19 00:06:41 +00001065
1066 Value *OldVal = Builder.CreateExtractValue(NewCI, 0);
1067 Value *Succ = Builder.CreateExtractValue(NewCI, 1);
1068
1069 OldVal = Builder.CreateIntToPtr(OldVal, CI->getCompareOperand()->getType());
1070
1071 Value *Res = UndefValue::get(CI->getType());
1072 Res = Builder.CreateInsertValue(Res, OldVal, 0);
1073 Res = Builder.CreateInsertValue(Res, Succ, 1);
1074
1075 CI->replaceAllUsesWith(Res);
1076 CI->eraseFromParent();
1077 return NewCI;
1078}
1079
Robin Morisset59c23cd2014-08-21 21:50:01 +00001080bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
Tim Northover70450c52014-04-03 13:06:54 +00001081 AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
1082 AtomicOrdering FailureOrder = CI->getFailureOrdering();
Tim Northoverc882eb02014-04-03 11:44:58 +00001083 Value *Addr = CI->getPointerOperand();
1084 BasicBlock *BB = CI->getParent();
1085 Function *F = BB->getParent();
1086 LLVMContext &Ctx = F->getContext();
James Y Knightf44fc522016-03-16 22:12:04 +00001087 // If shouldInsertFencesForAtomic() returns true, then the target does not
1088 // want to deal with memory orders, and emitLeading/TrailingFence should take
1089 // care of everything. Otherwise, emitLeading/TrailingFence are no-op and we
Robin Morisseted3d48f2014-09-03 21:29:59 +00001090 // should preserve the ordering.
James Y Knightf44fc522016-03-16 22:12:04 +00001091 bool ShouldInsertFencesForAtomic = TLI->shouldInsertFencesForAtomic(CI);
Robin Morisseta47cb412014-09-03 21:01:03 +00001092 AtomicOrdering MemOpOrder =
JF Bastien800f87a2016-04-06 21:19:33 +00001093 ShouldInsertFencesForAtomic ? AtomicOrdering::Monotonic : SuccessOrder;
Tim Northoverc882eb02014-04-03 11:44:58 +00001094
Tim Northoverd32f8e62016-02-22 20:55:50 +00001095 // In implementations which use a barrier to achieve release semantics, we can
1096 // delay emitting this barrier until we know a store is actually going to be
1097 // attempted. The cost of this delay is that we need 2 copies of the block
1098 // emitting the load-linked, affecting code size.
1099 //
1100 // Ideally, this logic would be unconditional except for the minsize check
1101 // since in other cases the extra blocks naturally collapse down to the
1102 // minimal loop. Unfortunately, this puts too much stress on later
1103 // optimisations so we avoid emitting the extra logic in those cases too.
James Y Knightf44fc522016-03-16 22:12:04 +00001104 bool HasReleasedLoadBB = !CI->isWeak() && ShouldInsertFencesForAtomic &&
JF Bastien800f87a2016-04-06 21:19:33 +00001105 SuccessOrder != AtomicOrdering::Monotonic &&
1106 SuccessOrder != AtomicOrdering::Acquire &&
1107 !F->optForMinSize();
Tim Northoverd32f8e62016-02-22 20:55:50 +00001108
1109 // There's no overhead for sinking the release barrier in a weak cmpxchg, so
1110 // do it even on minsize.
1111 bool UseUnconditionalReleaseBarrier = F->optForMinSize() && !CI->isWeak();
1112
Tim Northoverc882eb02014-04-03 11:44:58 +00001113 // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
1114 //
Tim Northover70450c52014-04-03 13:06:54 +00001115 // The full expansion we produce is:
Tim Northoverc882eb02014-04-03 11:44:58 +00001116 // [...]
Tim Northoverc882eb02014-04-03 11:44:58 +00001117 // cmpxchg.start:
Tim Northoverd32f8e62016-02-22 20:55:50 +00001118 // %unreleasedload = @load.linked(%addr)
1119 // %should_store = icmp eq %unreleasedload, %desired
1120 // br i1 %should_store, label %cmpxchg.fencedstore,
Ahmed Bougacha07a844d2015-09-22 17:21:44 +00001121 // label %cmpxchg.nostore
Tim Northoverd32f8e62016-02-22 20:55:50 +00001122 // cmpxchg.releasingstore:
1123 // fence?
1124 // br label cmpxchg.trystore
Tim Northoverc882eb02014-04-03 11:44:58 +00001125 // cmpxchg.trystore:
Tim Northoverd32f8e62016-02-22 20:55:50 +00001126 // %loaded.trystore = phi [%unreleasedload, %releasingstore],
1127 // [%releasedload, %cmpxchg.releasedload]
Tim Northoverc882eb02014-04-03 11:44:58 +00001128 // %stored = @store_conditional(%new, %addr)
Tim Northover20b9f732014-06-13 16:45:52 +00001129 // %success = icmp eq i32 %stored, 0
Tim Northoverd32f8e62016-02-22 20:55:50 +00001130 // br i1 %success, label %cmpxchg.success,
1131 // label %cmpxchg.releasedload/%cmpxchg.failure
1132 // cmpxchg.releasedload:
1133 // %releasedload = @load.linked(%addr)
1134 // %should_store = icmp eq %releasedload, %desired
1135 // br i1 %should_store, label %cmpxchg.trystore,
1136 // label %cmpxchg.failure
Tim Northover20b9f732014-06-13 16:45:52 +00001137 // cmpxchg.success:
1138 // fence?
1139 // br label %cmpxchg.end
Ahmed Bougacha07a844d2015-09-22 17:21:44 +00001140 // cmpxchg.nostore:
Tim Northoverd32f8e62016-02-22 20:55:50 +00001141 // %loaded.nostore = phi [%unreleasedload, %cmpxchg.start],
1142 // [%releasedload,
1143 // %cmpxchg.releasedload/%cmpxchg.trystore]
Ahmed Bougacha07a844d2015-09-22 17:21:44 +00001144 // @load_linked_fail_balance()?
1145 // br label %cmpxchg.failure
Tim Northover20b9f732014-06-13 16:45:52 +00001146 // cmpxchg.failure:
Tim Northoverc882eb02014-04-03 11:44:58 +00001147 // fence?
Tim Northover70450c52014-04-03 13:06:54 +00001148 // br label %cmpxchg.end
1149 // cmpxchg.end:
Tim Northoverd32f8e62016-02-22 20:55:50 +00001150 // %loaded = phi [%loaded.nostore, %cmpxchg.failure],
1151 // [%loaded.trystore, %cmpxchg.trystore]
Tim Northover20b9f732014-06-13 16:45:52 +00001152 // %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure]
1153 // %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
1154 // %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
Tim Northoverc882eb02014-04-03 11:44:58 +00001155 // [...]
Duncan P. N. Exon Smith8f11e1a2015-10-09 16:54:49 +00001156 BasicBlock *ExitBB = BB->splitBasicBlock(CI->getIterator(), "cmpxchg.end");
Tim Northover20b9f732014-06-13 16:45:52 +00001157 auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB);
Ahmed Bougacha07a844d2015-09-22 17:21:44 +00001158 auto NoStoreBB = BasicBlock::Create(Ctx, "cmpxchg.nostore", F, FailureBB);
1159 auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, NoStoreBB);
Tim Northoverd32f8e62016-02-22 20:55:50 +00001160 auto ReleasedLoadBB =
1161 BasicBlock::Create(Ctx, "cmpxchg.releasedload", F, SuccessBB);
1162 auto TryStoreBB =
1163 BasicBlock::Create(Ctx, "cmpxchg.trystore", F, ReleasedLoadBB);
1164 auto ReleasingStoreBB =
1165 BasicBlock::Create(Ctx, "cmpxchg.fencedstore", F, TryStoreBB);
1166 auto StartBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, ReleasingStoreBB);
Tim Northoverc882eb02014-04-03 11:44:58 +00001167
1168 // This grabs the DebugLoc from CI
1169 IRBuilder<> Builder(CI);
1170
1171 // The split call above "helpfully" added a branch at the end of BB (to the
1172 // wrong place), but we might want a fence too. It's easiest to just remove
1173 // the branch entirely.
1174 std::prev(BB->end())->eraseFromParent();
1175 Builder.SetInsertPoint(BB);
James Y Knightf44fc522016-03-16 22:12:04 +00001176 if (ShouldInsertFencesForAtomic && UseUnconditionalReleaseBarrier)
Tim Shen04de70d2017-05-09 15:27:17 +00001177 TLI->emitLeadingFence(Builder, CI, SuccessOrder);
Tim Northoverd32f8e62016-02-22 20:55:50 +00001178 Builder.CreateBr(StartBB);
Tim Northoverc882eb02014-04-03 11:44:58 +00001179
1180 // Start the main loop block now that we've taken care of the preliminaries.
Tim Northoverd32f8e62016-02-22 20:55:50 +00001181 Builder.SetInsertPoint(StartBB);
1182 Value *UnreleasedLoad = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
1183 Value *ShouldStore = Builder.CreateICmpEQ(
1184 UnreleasedLoad, CI->getCompareOperand(), "should_store");
Tim Northover70450c52014-04-03 13:06:54 +00001185
Eric Christopher572e03a2015-06-19 01:53:21 +00001186 // If the cmpxchg doesn't actually need any ordering when it fails, we can
Tim Northover70450c52014-04-03 13:06:54 +00001187 // jump straight past that fence instruction (if it exists).
Tim Northoverd32f8e62016-02-22 20:55:50 +00001188 Builder.CreateCondBr(ShouldStore, ReleasingStoreBB, NoStoreBB);
1189
1190 Builder.SetInsertPoint(ReleasingStoreBB);
James Y Knightf44fc522016-03-16 22:12:04 +00001191 if (ShouldInsertFencesForAtomic && !UseUnconditionalReleaseBarrier)
Tim Shen04de70d2017-05-09 15:27:17 +00001192 TLI->emitLeadingFence(Builder, CI, SuccessOrder);
Tim Northoverd32f8e62016-02-22 20:55:50 +00001193 Builder.CreateBr(TryStoreBB);
Tim Northoverc882eb02014-04-03 11:44:58 +00001194
1195 Builder.SetInsertPoint(TryStoreBB);
Robin Morisseta47cb412014-09-03 21:01:03 +00001196 Value *StoreSuccess = TLI->emitStoreConditional(
1197 Builder, CI->getNewValOperand(), Addr, MemOpOrder);
Tim Northoverd039abd2014-06-13 16:45:36 +00001198 StoreSuccess = Builder.CreateICmpEQ(
Tim Northoverc882eb02014-04-03 11:44:58 +00001199 StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
Tim Northoverd32f8e62016-02-22 20:55:50 +00001200 BasicBlock *RetryBB = HasReleasedLoadBB ? ReleasedLoadBB : StartBB;
Tim Northover20b9f732014-06-13 16:45:52 +00001201 Builder.CreateCondBr(StoreSuccess, SuccessBB,
Tim Northoverd32f8e62016-02-22 20:55:50 +00001202 CI->isWeak() ? FailureBB : RetryBB);
Tim Northoverc882eb02014-04-03 11:44:58 +00001203
Tim Northoverd32f8e62016-02-22 20:55:50 +00001204 Builder.SetInsertPoint(ReleasedLoadBB);
1205 Value *SecondLoad;
1206 if (HasReleasedLoadBB) {
1207 SecondLoad = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
1208 ShouldStore = Builder.CreateICmpEQ(SecondLoad, CI->getCompareOperand(),
1209 "should_store");
1210
1211 // If the cmpxchg doesn't actually need any ordering when it fails, we can
1212 // jump straight past that fence instruction (if it exists).
1213 Builder.CreateCondBr(ShouldStore, TryStoreBB, NoStoreBB);
1214 } else
1215 Builder.CreateUnreachable();
1216
1217 // Make sure later instructions don't get reordered with a fence if
1218 // necessary.
Tim Northover20b9f732014-06-13 16:45:52 +00001219 Builder.SetInsertPoint(SuccessBB);
James Y Knightf44fc522016-03-16 22:12:04 +00001220 if (ShouldInsertFencesForAtomic)
Tim Shen04de70d2017-05-09 15:27:17 +00001221 TLI->emitTrailingFence(Builder, CI, SuccessOrder);
Tim Northover70450c52014-04-03 13:06:54 +00001222 Builder.CreateBr(ExitBB);
Tim Northoverc882eb02014-04-03 11:44:58 +00001223
Ahmed Bougacha07a844d2015-09-22 17:21:44 +00001224 Builder.SetInsertPoint(NoStoreBB);
1225 // In the failing case, where we don't execute the store-conditional, the
1226 // target might want to balance out the load-linked with a dedicated
1227 // instruction (e.g., on ARM, clearing the exclusive monitor).
1228 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
1229 Builder.CreateBr(FailureBB);
1230
Tim Northover20b9f732014-06-13 16:45:52 +00001231 Builder.SetInsertPoint(FailureBB);
James Y Knightf44fc522016-03-16 22:12:04 +00001232 if (ShouldInsertFencesForAtomic)
Tim Shen04de70d2017-05-09 15:27:17 +00001233 TLI->emitTrailingFence(Builder, CI, FailureOrder);
Tim Northover20b9f732014-06-13 16:45:52 +00001234 Builder.CreateBr(ExitBB);
1235
Tim Northoverb4ddc082014-05-30 10:09:59 +00001236 // Finally, we have control-flow based knowledge of whether the cmpxchg
1237 // succeeded or not. We expose this to later passes by converting any
Tim Northoverd32f8e62016-02-22 20:55:50 +00001238 // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate
1239 // PHI.
Tim Northover20b9f732014-06-13 16:45:52 +00001240 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
Tim Northover420a2162014-06-13 14:24:07 +00001241 PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2);
1242 Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB);
Tim Northover20b9f732014-06-13 16:45:52 +00001243 Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB);
Tim Northoverb4ddc082014-05-30 10:09:59 +00001244
Tim Northoverd32f8e62016-02-22 20:55:50 +00001245 // Setup the builder so we can create any PHIs we need.
1246 Value *Loaded;
1247 if (!HasReleasedLoadBB)
1248 Loaded = UnreleasedLoad;
1249 else {
1250 Builder.SetInsertPoint(TryStoreBB, TryStoreBB->begin());
1251 PHINode *TryStoreLoaded = Builder.CreatePHI(UnreleasedLoad->getType(), 2);
1252 TryStoreLoaded->addIncoming(UnreleasedLoad, ReleasingStoreBB);
1253 TryStoreLoaded->addIncoming(SecondLoad, ReleasedLoadBB);
1254
1255 Builder.SetInsertPoint(NoStoreBB, NoStoreBB->begin());
1256 PHINode *NoStoreLoaded = Builder.CreatePHI(UnreleasedLoad->getType(), 2);
1257 NoStoreLoaded->addIncoming(UnreleasedLoad, StartBB);
1258 NoStoreLoaded->addIncoming(SecondLoad, ReleasedLoadBB);
1259
1260 Builder.SetInsertPoint(ExitBB, ++ExitBB->begin());
1261 PHINode *ExitLoaded = Builder.CreatePHI(UnreleasedLoad->getType(), 2);
1262 ExitLoaded->addIncoming(TryStoreLoaded, SuccessBB);
1263 ExitLoaded->addIncoming(NoStoreLoaded, FailureBB);
1264
1265 Loaded = ExitLoaded;
1266 }
1267
Tim Northoverb4ddc082014-05-30 10:09:59 +00001268 // Look for any users of the cmpxchg that are just comparing the loaded value
1269 // against the desired one, and replace them with the CFG-derived version.
Tim Northover420a2162014-06-13 14:24:07 +00001270 SmallVector<ExtractValueInst *, 2> PrunedInsts;
Tim Northoverb4ddc082014-05-30 10:09:59 +00001271 for (auto User : CI->users()) {
Tim Northover420a2162014-06-13 14:24:07 +00001272 ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User);
1273 if (!EV)
Tim Northoverb4ddc082014-05-30 10:09:59 +00001274 continue;
1275
Tim Northover420a2162014-06-13 14:24:07 +00001276 assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 &&
1277 "weird extraction from { iN, i1 }");
Tim Northoverb4ddc082014-05-30 10:09:59 +00001278
Tim Northover420a2162014-06-13 14:24:07 +00001279 if (EV->getIndices()[0] == 0)
1280 EV->replaceAllUsesWith(Loaded);
1281 else
1282 EV->replaceAllUsesWith(Success);
1283
1284 PrunedInsts.push_back(EV);
Tim Northoverb4ddc082014-05-30 10:09:59 +00001285 }
1286
Tim Northover420a2162014-06-13 14:24:07 +00001287 // We can remove the instructions now we're no longer iterating through them.
1288 for (auto EV : PrunedInsts)
1289 EV->eraseFromParent();
Tim Northoverc882eb02014-04-03 11:44:58 +00001290
Tim Northover420a2162014-06-13 14:24:07 +00001291 if (!CI->use_empty()) {
1292 // Some use of the full struct return that we don't understand has happened,
1293 // so we've got to reconstruct it properly.
1294 Value *Res;
1295 Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0);
1296 Res = Builder.CreateInsertValue(Res, Success, 1);
1297
1298 CI->replaceAllUsesWith(Res);
1299 }
1300
1301 CI->eraseFromParent();
Tim Northoverc882eb02014-04-03 11:44:58 +00001302 return true;
1303}
Robin Morisset810739d2014-09-25 17:27:43 +00001304
1305bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) {
1306 auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
1307 if(!C)
1308 return false;
1309
1310 AtomicRMWInst::BinOp Op = RMWI->getOperation();
1311 switch(Op) {
1312 case AtomicRMWInst::Add:
1313 case AtomicRMWInst::Sub:
1314 case AtomicRMWInst::Or:
1315 case AtomicRMWInst::Xor:
1316 return C->isZero();
1317 case AtomicRMWInst::And:
1318 return C->isMinusOne();
1319 // FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/...
1320 default:
1321 return false;
1322 }
1323}
1324
1325bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) {
Ahmed Bougacha49b531a2015-09-12 18:51:23 +00001326 if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
1327 tryExpandAtomicLoad(ResultingLoad);
1328 return true;
1329 }
Robin Morisset810739d2014-09-25 17:27:43 +00001330 return false;
1331}
JF Bastiene8aad292015-08-03 15:29:47 +00001332
James Y Knight148a6462016-06-17 18:11:48 +00001333Value *AtomicExpand::insertRMWCmpXchgLoop(
1334 IRBuilder<> &Builder, Type *ResultTy, Value *Addr,
1335 AtomicOrdering MemOpOrder,
1336 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp,
1337 CreateCmpXchgInstFun CreateCmpXchg) {
1338 LLVMContext &Ctx = Builder.getContext();
1339 BasicBlock *BB = Builder.GetInsertBlock();
JF Bastiene8aad292015-08-03 15:29:47 +00001340 Function *F = BB->getParent();
JF Bastiene8aad292015-08-03 15:29:47 +00001341
1342 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
1343 //
1344 // The standard expansion we produce is:
1345 // [...]
1346 // %init_loaded = load atomic iN* %addr
1347 // br label %loop
1348 // loop:
1349 // %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
1350 // %new = some_op iN %loaded, %incr
1351 // %pair = cmpxchg iN* %addr, iN %loaded, iN %new
1352 // %new_loaded = extractvalue { iN, i1 } %pair, 0
1353 // %success = extractvalue { iN, i1 } %pair, 1
1354 // br i1 %success, label %atomicrmw.end, label %loop
1355 // atomicrmw.end:
1356 // [...]
James Y Knight148a6462016-06-17 18:11:48 +00001357 BasicBlock *ExitBB =
1358 BB->splitBasicBlock(Builder.GetInsertPoint(), "atomicrmw.end");
JF Bastiene8aad292015-08-03 15:29:47 +00001359 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
1360
JF Bastiene8aad292015-08-03 15:29:47 +00001361 // The split call above "helpfully" added a branch at the end of BB (to the
1362 // wrong place), but we want a load. It's easiest to just remove
1363 // the branch entirely.
1364 std::prev(BB->end())->eraseFromParent();
1365 Builder.SetInsertPoint(BB);
James Y Knight148a6462016-06-17 18:11:48 +00001366 LoadInst *InitLoaded = Builder.CreateLoad(ResultTy, Addr);
JF Bastiene8aad292015-08-03 15:29:47 +00001367 // Atomics require at least natural alignment.
James Y Knight148a6462016-06-17 18:11:48 +00001368 InitLoaded->setAlignment(ResultTy->getPrimitiveSizeInBits() / 8);
JF Bastiene8aad292015-08-03 15:29:47 +00001369 Builder.CreateBr(LoopBB);
1370
1371 // Start the main loop block now that we've taken care of the preliminaries.
1372 Builder.SetInsertPoint(LoopBB);
James Y Knight148a6462016-06-17 18:11:48 +00001373 PHINode *Loaded = Builder.CreatePHI(ResultTy, 2, "loaded");
JF Bastiene8aad292015-08-03 15:29:47 +00001374 Loaded->addIncoming(InitLoaded, BB);
1375
James Y Knight148a6462016-06-17 18:11:48 +00001376 Value *NewVal = PerformOp(Builder, Loaded);
JF Bastiene8aad292015-08-03 15:29:47 +00001377
1378 Value *NewLoaded = nullptr;
1379 Value *Success = nullptr;
1380
James Y Knight148a6462016-06-17 18:11:48 +00001381 CreateCmpXchg(Builder, Addr, Loaded, NewVal,
1382 MemOpOrder == AtomicOrdering::Unordered
1383 ? AtomicOrdering::Monotonic
1384 : MemOpOrder,
JF Bastiene8aad292015-08-03 15:29:47 +00001385 Success, NewLoaded);
1386 assert(Success && NewLoaded);
1387
1388 Loaded->addIncoming(NewLoaded, LoopBB);
1389
1390 Builder.CreateCondBr(Success, ExitBB, LoopBB);
1391
1392 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
James Y Knight148a6462016-06-17 18:11:48 +00001393 return NewLoaded;
1394}
JF Bastiene8aad292015-08-03 15:29:47 +00001395
Alex Bradbury79518b02018-09-19 14:51:42 +00001396bool AtomicExpand::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
1397 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
1398 unsigned ValueSize = getAtomicOpSize(CI);
1399
1400 switch (TLI->shouldExpandAtomicCmpXchgInIR(CI)) {
1401 default:
1402 llvm_unreachable("Unhandled case in tryExpandAtomicCmpXchg");
1403 case TargetLoweringBase::AtomicExpansionKind::None:
1404 if (ValueSize < MinCASSize)
1405 expandPartwordCmpXchg(CI);
1406 return false;
1407 case TargetLoweringBase::AtomicExpansionKind::LLSC: {
1408 assert(ValueSize >= MinCASSize &&
1409 "MinCmpXchgSizeInBits not yet supported for LL/SC expansions.");
1410 return expandAtomicCmpXchg(CI);
1411 }
1412 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic:
Alex Bradbury66d9a752018-11-29 20:43:42 +00001413 expandAtomicCmpXchgToMaskedIntrinsic(CI);
1414 return true;
Alex Bradbury79518b02018-09-19 14:51:42 +00001415 }
1416}
1417
James Y Knight148a6462016-06-17 18:11:48 +00001418// Note: This function is exposed externally by AtomicExpandUtils.h
1419bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
1420 CreateCmpXchgInstFun CreateCmpXchg) {
1421 IRBuilder<> Builder(AI);
1422 Value *Loaded = AtomicExpand::insertRMWCmpXchgLoop(
1423 Builder, AI->getType(), AI->getPointerOperand(), AI->getOrdering(),
1424 [&](IRBuilder<> &Builder, Value *Loaded) {
1425 return performAtomicOp(AI->getOperation(), Builder, Loaded,
1426 AI->getValOperand());
1427 },
1428 CreateCmpXchg);
1429
1430 AI->replaceAllUsesWith(Loaded);
JF Bastiene8aad292015-08-03 15:29:47 +00001431 AI->eraseFromParent();
JF Bastiene8aad292015-08-03 15:29:47 +00001432 return true;
1433}
James Y Knight19f6cce2016-04-12 20:18:48 +00001434
James Y Knight19f6cce2016-04-12 20:18:48 +00001435// In order to use one of the sized library calls such as
1436// __atomic_fetch_add_4, the alignment must be sufficient, the size
1437// must be one of the potentially-specialized sizes, and the value
1438// type must actually exist in C on the target (otherwise, the
1439// function wouldn't actually be defined.)
1440static bool canUseSizedAtomicCall(unsigned Size, unsigned Align,
1441 const DataLayout &DL) {
1442 // TODO: "LargestSize" is an approximation for "largest type that
1443 // you can express in C". It seems to be the case that int128 is
1444 // supported on all 64-bit platforms, otherwise only up to 64-bit
1445 // integers are supported. If we get this wrong, then we'll try to
1446 // call a sized libcall that doesn't actually exist. There should
1447 // really be some more reliable way in LLVM of determining integer
1448 // sizes which are valid in the target's C ABI...
Jun Bum Limbe11bdc2016-05-13 18:38:35 +00001449 unsigned LargestSize = DL.getLargestLegalIntTypeSizeInBits() >= 64 ? 16 : 8;
James Y Knight19f6cce2016-04-12 20:18:48 +00001450 return Align >= Size &&
1451 (Size == 1 || Size == 2 || Size == 4 || Size == 8 || Size == 16) &&
1452 Size <= LargestSize;
1453}
1454
1455void AtomicExpand::expandAtomicLoadToLibcall(LoadInst *I) {
1456 static const RTLIB::Libcall Libcalls[6] = {
1457 RTLIB::ATOMIC_LOAD, RTLIB::ATOMIC_LOAD_1, RTLIB::ATOMIC_LOAD_2,
1458 RTLIB::ATOMIC_LOAD_4, RTLIB::ATOMIC_LOAD_8, RTLIB::ATOMIC_LOAD_16};
1459 unsigned Size = getAtomicOpSize(I);
1460 unsigned Align = getAtomicOpAlign(I);
1461
1462 bool expanded = expandAtomicOpToLibcall(
1463 I, Size, Align, I->getPointerOperand(), nullptr, nullptr,
1464 I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
1465 (void)expanded;
1466 assert(expanded && "expandAtomicOpToLibcall shouldn't fail tor Load");
1467}
1468
1469void AtomicExpand::expandAtomicStoreToLibcall(StoreInst *I) {
1470 static const RTLIB::Libcall Libcalls[6] = {
1471 RTLIB::ATOMIC_STORE, RTLIB::ATOMIC_STORE_1, RTLIB::ATOMIC_STORE_2,
1472 RTLIB::ATOMIC_STORE_4, RTLIB::ATOMIC_STORE_8, RTLIB::ATOMIC_STORE_16};
1473 unsigned Size = getAtomicOpSize(I);
1474 unsigned Align = getAtomicOpAlign(I);
1475
1476 bool expanded = expandAtomicOpToLibcall(
1477 I, Size, Align, I->getPointerOperand(), I->getValueOperand(), nullptr,
1478 I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
1479 (void)expanded;
1480 assert(expanded && "expandAtomicOpToLibcall shouldn't fail tor Store");
1481}
1482
1483void AtomicExpand::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) {
1484 static const RTLIB::Libcall Libcalls[6] = {
1485 RTLIB::ATOMIC_COMPARE_EXCHANGE, RTLIB::ATOMIC_COMPARE_EXCHANGE_1,
1486 RTLIB::ATOMIC_COMPARE_EXCHANGE_2, RTLIB::ATOMIC_COMPARE_EXCHANGE_4,
1487 RTLIB::ATOMIC_COMPARE_EXCHANGE_8, RTLIB::ATOMIC_COMPARE_EXCHANGE_16};
1488 unsigned Size = getAtomicOpSize(I);
1489 unsigned Align = getAtomicOpAlign(I);
1490
1491 bool expanded = expandAtomicOpToLibcall(
1492 I, Size, Align, I->getPointerOperand(), I->getNewValOperand(),
1493 I->getCompareOperand(), I->getSuccessOrdering(), I->getFailureOrdering(),
1494 Libcalls);
1495 (void)expanded;
1496 assert(expanded && "expandAtomicOpToLibcall shouldn't fail tor CAS");
1497}
1498
1499static ArrayRef<RTLIB::Libcall> GetRMWLibcall(AtomicRMWInst::BinOp Op) {
1500 static const RTLIB::Libcall LibcallsXchg[6] = {
1501 RTLIB::ATOMIC_EXCHANGE, RTLIB::ATOMIC_EXCHANGE_1,
1502 RTLIB::ATOMIC_EXCHANGE_2, RTLIB::ATOMIC_EXCHANGE_4,
1503 RTLIB::ATOMIC_EXCHANGE_8, RTLIB::ATOMIC_EXCHANGE_16};
1504 static const RTLIB::Libcall LibcallsAdd[6] = {
1505 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_ADD_1,
1506 RTLIB::ATOMIC_FETCH_ADD_2, RTLIB::ATOMIC_FETCH_ADD_4,
1507 RTLIB::ATOMIC_FETCH_ADD_8, RTLIB::ATOMIC_FETCH_ADD_16};
1508 static const RTLIB::Libcall LibcallsSub[6] = {
1509 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_SUB_1,
1510 RTLIB::ATOMIC_FETCH_SUB_2, RTLIB::ATOMIC_FETCH_SUB_4,
1511 RTLIB::ATOMIC_FETCH_SUB_8, RTLIB::ATOMIC_FETCH_SUB_16};
1512 static const RTLIB::Libcall LibcallsAnd[6] = {
1513 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_AND_1,
1514 RTLIB::ATOMIC_FETCH_AND_2, RTLIB::ATOMIC_FETCH_AND_4,
1515 RTLIB::ATOMIC_FETCH_AND_8, RTLIB::ATOMIC_FETCH_AND_16};
1516 static const RTLIB::Libcall LibcallsOr[6] = {
1517 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_OR_1,
1518 RTLIB::ATOMIC_FETCH_OR_2, RTLIB::ATOMIC_FETCH_OR_4,
1519 RTLIB::ATOMIC_FETCH_OR_8, RTLIB::ATOMIC_FETCH_OR_16};
1520 static const RTLIB::Libcall LibcallsXor[6] = {
1521 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_XOR_1,
1522 RTLIB::ATOMIC_FETCH_XOR_2, RTLIB::ATOMIC_FETCH_XOR_4,
1523 RTLIB::ATOMIC_FETCH_XOR_8, RTLIB::ATOMIC_FETCH_XOR_16};
1524 static const RTLIB::Libcall LibcallsNand[6] = {
1525 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_NAND_1,
1526 RTLIB::ATOMIC_FETCH_NAND_2, RTLIB::ATOMIC_FETCH_NAND_4,
1527 RTLIB::ATOMIC_FETCH_NAND_8, RTLIB::ATOMIC_FETCH_NAND_16};
1528
1529 switch (Op) {
1530 case AtomicRMWInst::BAD_BINOP:
1531 llvm_unreachable("Should not have BAD_BINOP.");
1532 case AtomicRMWInst::Xchg:
1533 return makeArrayRef(LibcallsXchg);
1534 case AtomicRMWInst::Add:
1535 return makeArrayRef(LibcallsAdd);
1536 case AtomicRMWInst::Sub:
1537 return makeArrayRef(LibcallsSub);
1538 case AtomicRMWInst::And:
1539 return makeArrayRef(LibcallsAnd);
1540 case AtomicRMWInst::Or:
1541 return makeArrayRef(LibcallsOr);
1542 case AtomicRMWInst::Xor:
1543 return makeArrayRef(LibcallsXor);
1544 case AtomicRMWInst::Nand:
1545 return makeArrayRef(LibcallsNand);
1546 case AtomicRMWInst::Max:
1547 case AtomicRMWInst::Min:
1548 case AtomicRMWInst::UMax:
1549 case AtomicRMWInst::UMin:
1550 // No atomic libcalls are available for max/min/umax/umin.
1551 return {};
1552 }
1553 llvm_unreachable("Unexpected AtomicRMW operation.");
1554}
1555
1556void AtomicExpand::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
1557 ArrayRef<RTLIB::Libcall> Libcalls = GetRMWLibcall(I->getOperation());
1558
1559 unsigned Size = getAtomicOpSize(I);
1560 unsigned Align = getAtomicOpAlign(I);
1561
1562 bool Success = false;
1563 if (!Libcalls.empty())
1564 Success = expandAtomicOpToLibcall(
1565 I, Size, Align, I->getPointerOperand(), I->getValOperand(), nullptr,
1566 I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
1567
1568 // The expansion failed: either there were no libcalls at all for
1569 // the operation (min/max), or there were only size-specialized
1570 // libcalls (add/sub/etc) and we needed a generic. So, expand to a
1571 // CAS libcall, via a CAS loop, instead.
1572 if (!Success) {
1573 expandAtomicRMWToCmpXchg(I, [this](IRBuilder<> &Builder, Value *Addr,
1574 Value *Loaded, Value *NewVal,
1575 AtomicOrdering MemOpOrder,
1576 Value *&Success, Value *&NewLoaded) {
1577 // Create the CAS instruction normally...
1578 AtomicCmpXchgInst *Pair = Builder.CreateAtomicCmpXchg(
1579 Addr, Loaded, NewVal, MemOpOrder,
1580 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder));
1581 Success = Builder.CreateExtractValue(Pair, 1, "success");
1582 NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
1583
1584 // ...and then expand the CAS into a libcall.
1585 expandAtomicCASToLibcall(Pair);
1586 });
1587 }
1588}
1589
1590// A helper routine for the above expandAtomic*ToLibcall functions.
1591//
1592// 'Libcalls' contains an array of enum values for the particular
1593// ATOMIC libcalls to be emitted. All of the other arguments besides
1594// 'I' are extracted from the Instruction subclass by the
1595// caller. Depending on the particular call, some will be null.
1596bool AtomicExpand::expandAtomicOpToLibcall(
1597 Instruction *I, unsigned Size, unsigned Align, Value *PointerOperand,
1598 Value *ValueOperand, Value *CASExpected, AtomicOrdering Ordering,
1599 AtomicOrdering Ordering2, ArrayRef<RTLIB::Libcall> Libcalls) {
1600 assert(Libcalls.size() == 6);
1601
1602 LLVMContext &Ctx = I->getContext();
1603 Module *M = I->getModule();
1604 const DataLayout &DL = M->getDataLayout();
1605 IRBuilder<> Builder(I);
1606 IRBuilder<> AllocaBuilder(&I->getFunction()->getEntryBlock().front());
1607
1608 bool UseSizedLibcall = canUseSizedAtomicCall(Size, Align, DL);
1609 Type *SizedIntTy = Type::getIntNTy(Ctx, Size * 8);
1610
1611 unsigned AllocaAlignment = DL.getPrefTypeAlignment(SizedIntTy);
1612
1613 // TODO: the "order" argument type is "int", not int32. So
1614 // getInt32Ty may be wrong if the arch uses e.g. 16-bit ints.
1615 ConstantInt *SizeVal64 = ConstantInt::get(Type::getInt64Ty(Ctx), Size);
JF Bastienbbb0aee62016-04-18 18:01:43 +00001616 assert(Ordering != AtomicOrdering::NotAtomic && "expect atomic MO");
James Y Knight19f6cce2016-04-12 20:18:48 +00001617 Constant *OrderingVal =
JF Bastienbbb0aee62016-04-18 18:01:43 +00001618 ConstantInt::get(Type::getInt32Ty(Ctx), (int)toCABI(Ordering));
1619 Constant *Ordering2Val = nullptr;
1620 if (CASExpected) {
1621 assert(Ordering2 != AtomicOrdering::NotAtomic && "expect atomic MO");
1622 Ordering2Val =
1623 ConstantInt::get(Type::getInt32Ty(Ctx), (int)toCABI(Ordering2));
1624 }
James Y Knight19f6cce2016-04-12 20:18:48 +00001625 bool HasResult = I->getType() != Type::getVoidTy(Ctx);
1626
1627 RTLIB::Libcall RTLibType;
1628 if (UseSizedLibcall) {
1629 switch (Size) {
1630 case 1: RTLibType = Libcalls[1]; break;
1631 case 2: RTLibType = Libcalls[2]; break;
1632 case 4: RTLibType = Libcalls[3]; break;
1633 case 8: RTLibType = Libcalls[4]; break;
1634 case 16: RTLibType = Libcalls[5]; break;
1635 }
1636 } else if (Libcalls[0] != RTLIB::UNKNOWN_LIBCALL) {
1637 RTLibType = Libcalls[0];
1638 } else {
1639 // Can't use sized function, and there's no generic for this
1640 // operation, so give up.
1641 return false;
1642 }
1643
1644 // Build up the function call. There's two kinds. First, the sized
1645 // variants. These calls are going to be one of the following (with
1646 // N=1,2,4,8,16):
1647 // iN __atomic_load_N(iN *ptr, int ordering)
1648 // void __atomic_store_N(iN *ptr, iN val, int ordering)
1649 // iN __atomic_{exchange|fetch_*}_N(iN *ptr, iN val, int ordering)
1650 // bool __atomic_compare_exchange_N(iN *ptr, iN *expected, iN desired,
1651 // int success_order, int failure_order)
1652 //
1653 // Note that these functions can be used for non-integer atomic
1654 // operations, the values just need to be bitcast to integers on the
1655 // way in and out.
1656 //
1657 // And, then, the generic variants. They look like the following:
1658 // void __atomic_load(size_t size, void *ptr, void *ret, int ordering)
1659 // void __atomic_store(size_t size, void *ptr, void *val, int ordering)
1660 // void __atomic_exchange(size_t size, void *ptr, void *val, void *ret,
1661 // int ordering)
1662 // bool __atomic_compare_exchange(size_t size, void *ptr, void *expected,
1663 // void *desired, int success_order,
1664 // int failure_order)
1665 //
1666 // The different signatures are built up depending on the
1667 // 'UseSizedLibcall', 'CASExpected', 'ValueOperand', and 'HasResult'
1668 // variables.
1669
1670 AllocaInst *AllocaCASExpected = nullptr;
1671 Value *AllocaCASExpected_i8 = nullptr;
1672 AllocaInst *AllocaValue = nullptr;
1673 Value *AllocaValue_i8 = nullptr;
1674 AllocaInst *AllocaResult = nullptr;
1675 Value *AllocaResult_i8 = nullptr;
1676
1677 Type *ResultTy;
1678 SmallVector<Value *, 6> Args;
Reid Klecknerb5180542017-03-21 16:57:19 +00001679 AttributeList Attr;
James Y Knight19f6cce2016-04-12 20:18:48 +00001680
1681 // 'size' argument.
1682 if (!UseSizedLibcall) {
1683 // Note, getIntPtrType is assumed equivalent to size_t.
1684 Args.push_back(ConstantInt::get(DL.getIntPtrType(Ctx), Size));
1685 }
1686
1687 // 'ptr' argument.
1688 Value *PtrVal =
1689 Builder.CreateBitCast(PointerOperand, Type::getInt8PtrTy(Ctx));
1690 Args.push_back(PtrVal);
1691
1692 // 'expected' argument, if present.
1693 if (CASExpected) {
1694 AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->getType());
1695 AllocaCASExpected->setAlignment(AllocaAlignment);
1696 AllocaCASExpected_i8 =
1697 Builder.CreateBitCast(AllocaCASExpected, Type::getInt8PtrTy(Ctx));
1698 Builder.CreateLifetimeStart(AllocaCASExpected_i8, SizeVal64);
1699 Builder.CreateAlignedStore(CASExpected, AllocaCASExpected, AllocaAlignment);
1700 Args.push_back(AllocaCASExpected_i8);
1701 }
1702
1703 // 'val' argument ('desired' for cas), if present.
1704 if (ValueOperand) {
1705 if (UseSizedLibcall) {
1706 Value *IntValue =
1707 Builder.CreateBitOrPointerCast(ValueOperand, SizedIntTy);
1708 Args.push_back(IntValue);
1709 } else {
1710 AllocaValue = AllocaBuilder.CreateAlloca(ValueOperand->getType());
1711 AllocaValue->setAlignment(AllocaAlignment);
1712 AllocaValue_i8 =
1713 Builder.CreateBitCast(AllocaValue, Type::getInt8PtrTy(Ctx));
1714 Builder.CreateLifetimeStart(AllocaValue_i8, SizeVal64);
1715 Builder.CreateAlignedStore(ValueOperand, AllocaValue, AllocaAlignment);
1716 Args.push_back(AllocaValue_i8);
1717 }
1718 }
1719
1720 // 'ret' argument.
1721 if (!CASExpected && HasResult && !UseSizedLibcall) {
1722 AllocaResult = AllocaBuilder.CreateAlloca(I->getType());
1723 AllocaResult->setAlignment(AllocaAlignment);
1724 AllocaResult_i8 =
1725 Builder.CreateBitCast(AllocaResult, Type::getInt8PtrTy(Ctx));
1726 Builder.CreateLifetimeStart(AllocaResult_i8, SizeVal64);
1727 Args.push_back(AllocaResult_i8);
1728 }
1729
1730 // 'ordering' ('success_order' for cas) argument.
1731 Args.push_back(OrderingVal);
1732
1733 // 'failure_order' argument, if present.
1734 if (Ordering2Val)
1735 Args.push_back(Ordering2Val);
1736
1737 // Now, the return type.
1738 if (CASExpected) {
1739 ResultTy = Type::getInt1Ty(Ctx);
Reid Klecknerb5180542017-03-21 16:57:19 +00001740 Attr = Attr.addAttribute(Ctx, AttributeList::ReturnIndex, Attribute::ZExt);
James Y Knight19f6cce2016-04-12 20:18:48 +00001741 } else if (HasResult && UseSizedLibcall)
1742 ResultTy = SizedIntTy;
1743 else
1744 ResultTy = Type::getVoidTy(Ctx);
1745
1746 // Done with setting up arguments and return types, create the call:
1747 SmallVector<Type *, 6> ArgTys;
1748 for (Value *Arg : Args)
1749 ArgTys.push_back(Arg->getType());
1750 FunctionType *FnType = FunctionType::get(ResultTy, ArgTys, false);
1751 Constant *LibcallFn =
1752 M->getOrInsertFunction(TLI->getLibcallName(RTLibType), FnType, Attr);
1753 CallInst *Call = Builder.CreateCall(LibcallFn, Args);
1754 Call->setAttributes(Attr);
1755 Value *Result = Call;
1756
1757 // And then, extract the results...
1758 if (ValueOperand && !UseSizedLibcall)
1759 Builder.CreateLifetimeEnd(AllocaValue_i8, SizeVal64);
1760
1761 if (CASExpected) {
1762 // The final result from the CAS is {load of 'expected' alloca, bool result
1763 // from call}
1764 Type *FinalResultTy = I->getType();
1765 Value *V = UndefValue::get(FinalResultTy);
1766 Value *ExpectedOut =
1767 Builder.CreateAlignedLoad(AllocaCASExpected, AllocaAlignment);
1768 Builder.CreateLifetimeEnd(AllocaCASExpected_i8, SizeVal64);
1769 V = Builder.CreateInsertValue(V, ExpectedOut, 0);
1770 V = Builder.CreateInsertValue(V, Result, 1);
1771 I->replaceAllUsesWith(V);
1772 } else if (HasResult) {
1773 Value *V;
1774 if (UseSizedLibcall)
1775 V = Builder.CreateBitOrPointerCast(Result, I->getType());
1776 else {
1777 V = Builder.CreateAlignedLoad(AllocaResult, AllocaAlignment);
1778 Builder.CreateLifetimeEnd(AllocaResult_i8, SizeVal64);
1779 }
1780 I->replaceAllUsesWith(V);
1781 }
1782 I->eraseFromParent();
1783 return true;
1784}