blob: 18e07837018ab33c0f74bdd3e022f8f205f1203f [file] [log] [blame]
Stephen Hines36b56882014-04-23 16:57:46 -07001//===-- ARMAtomicExpandPass.cpp - Expand atomic instructions --------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains a pass (at IR level) to replace atomic instructions with
11// appropriate (intrinsic-based) ldrex/strex loops.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "arm-atomic-expand"
16#include "ARM.h"
17#include "llvm/CodeGen/Passes.h"
18#include "llvm/IR/Function.h"
19#include "llvm/IR/IRBuilder.h"
20#include "llvm/IR/Instructions.h"
21#include "llvm/IR/Intrinsics.h"
22#include "llvm/IR/Module.h"
23#include "llvm/Support/Debug.h"
24#include "llvm/Target/TargetLowering.h"
25#include "llvm/Target/TargetMachine.h"
26using namespace llvm;
27
28namespace {
29 class ARMAtomicExpandPass : public FunctionPass {
30 const TargetLowering *TLI;
31 public:
32 static char ID; // Pass identification, replacement for typeid
33 explicit ARMAtomicExpandPass(const TargetMachine *TM = 0)
34 : FunctionPass(ID), TLI(TM->getTargetLowering()) {}
35
36 bool runOnFunction(Function &F) override;
37 bool expandAtomicInsts(Function &F);
38
39 bool expandAtomicLoad(LoadInst *LI);
40 bool expandAtomicStore(StoreInst *LI);
41 bool expandAtomicRMW(AtomicRMWInst *AI);
42 bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
43
44 AtomicOrdering insertLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord);
45 void insertTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord);
46
47 /// Perform a load-linked operation on Addr, returning a "Value *" with the
48 /// corresponding pointee type. This may entail some non-trivial operations
49 /// to truncate or reconstruct illegal types since intrinsics must be legal
50 Value *loadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord);
51
52 /// Perform a store-conditional operation to Addr. Return the status of the
53 /// store: 0 if the it succeeded, non-zero otherwise.
54 Value *storeConditional(IRBuilder<> &Builder, Value *Val, Value *Addr,
55 AtomicOrdering Ord);
56
57 /// Return true if the given (atomic) instruction should be expanded by this
58 /// pass.
59 bool shouldExpandAtomic(Instruction *Inst);
60 };
61}
62
63char ARMAtomicExpandPass::ID = 0;
64
65FunctionPass *llvm::createARMAtomicExpandPass(const TargetMachine *TM) {
66 return new ARMAtomicExpandPass(TM);
67}
68
69bool ARMAtomicExpandPass::runOnFunction(Function &F) {
70 SmallVector<Instruction *, 1> AtomicInsts;
71
72 // Changing control-flow while iterating through it is a bad idea, so gather a
73 // list of all atomic instructions before we start.
74 for (BasicBlock &BB : F)
75 for (Instruction &Inst : BB) {
76 if (isa<AtomicRMWInst>(&Inst) || isa<AtomicCmpXchgInst>(&Inst) ||
77 (isa<LoadInst>(&Inst) && cast<LoadInst>(&Inst)->isAtomic()) ||
78 (isa<StoreInst>(&Inst) && cast<StoreInst>(&Inst)->isAtomic()))
79 AtomicInsts.push_back(&Inst);
80 }
81
82 bool MadeChange = false;
83 for (Instruction *Inst : AtomicInsts) {
84 if (!shouldExpandAtomic(Inst))
85 continue;
86
87 if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Inst))
88 MadeChange |= expandAtomicRMW(AI);
89 else if (AtomicCmpXchgInst *CI = dyn_cast<AtomicCmpXchgInst>(Inst))
90 MadeChange |= expandAtomicCmpXchg(CI);
91 else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
92 MadeChange |= expandAtomicLoad(LI);
93 else if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
94 MadeChange |= expandAtomicStore(SI);
95 else
96 llvm_unreachable("Unknown atomic instruction");
97 }
98
99 return MadeChange;
100}
101
102bool ARMAtomicExpandPass::expandAtomicLoad(LoadInst *LI) {
103 // Load instructions don't actually need a leading fence, even in the
104 // SequentiallyConsistent case.
105 AtomicOrdering MemOpOrder =
106 TLI->getInsertFencesForAtomic() ? Monotonic : LI->getOrdering();
107
108 // The only 64-bit load guaranteed to be single-copy atomic by the ARM ARM is
109 // an ldrexd (A3.5.3).
110 IRBuilder<> Builder(LI);
111 Value *Val = loadLinked(Builder, LI->getPointerOperand(), MemOpOrder);
112
113 insertTrailingFence(Builder, LI->getOrdering());
114
115 LI->replaceAllUsesWith(Val);
116 LI->eraseFromParent();
117
118 return true;
119}
120
121bool ARMAtomicExpandPass::expandAtomicStore(StoreInst *SI) {
122 // The only atomic 64-bit store on ARM is an strexd that succeeds, which means
123 // we need a loop and the entire instruction is essentially an "atomicrmw
124 // xchg" that ignores the value loaded.
125 IRBuilder<> Builder(SI);
126 AtomicRMWInst *AI =
127 Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
128 SI->getValueOperand(), SI->getOrdering());
129 SI->eraseFromParent();
130
131 // Now we have an appropriate swap instruction, lower it as usual.
132 return expandAtomicRMW(AI);
133}
134
135bool ARMAtomicExpandPass::expandAtomicRMW(AtomicRMWInst *AI) {
136 AtomicOrdering Order = AI->getOrdering();
137 Value *Addr = AI->getPointerOperand();
138 BasicBlock *BB = AI->getParent();
139 Function *F = BB->getParent();
140 LLVMContext &Ctx = F->getContext();
141
142 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
143 //
144 // The standard expansion we produce is:
145 // [...]
146 // fence?
147 // atomicrmw.start:
148 // %loaded = @load.linked(%addr)
149 // %new = some_op iN %loaded, %incr
150 // %stored = @store_conditional(%new, %addr)
151 // %try_again = icmp i32 ne %stored, 0
152 // br i1 %try_again, label %loop, label %atomicrmw.end
153 // atomicrmw.end:
154 // fence?
155 // [...]
156 BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
157 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
158
159 // This grabs the DebugLoc from AI.
160 IRBuilder<> Builder(AI);
161
162 // The split call above "helpfully" added a branch at the end of BB (to the
163 // wrong place), but we might want a fence too. It's easiest to just remove
164 // the branch entirely.
165 std::prev(BB->end())->eraseFromParent();
166 Builder.SetInsertPoint(BB);
167 AtomicOrdering MemOpOrder = insertLeadingFence(Builder, Order);
168 Builder.CreateBr(LoopBB);
169
170 // Start the main loop block now that we've taken care of the preliminaries.
171 Builder.SetInsertPoint(LoopBB);
172 Value *Loaded = loadLinked(Builder, Addr, MemOpOrder);
173
174 Value *NewVal;
175 switch (AI->getOperation()) {
176 case AtomicRMWInst::Xchg:
177 NewVal = AI->getValOperand();
178 break;
179 case AtomicRMWInst::Add:
180 NewVal = Builder.CreateAdd(Loaded, AI->getValOperand(), "new");
181 break;
182 case AtomicRMWInst::Sub:
183 NewVal = Builder.CreateSub(Loaded, AI->getValOperand(), "new");
184 break;
185 case AtomicRMWInst::And:
186 NewVal = Builder.CreateAnd(Loaded, AI->getValOperand(), "new");
187 break;
188 case AtomicRMWInst::Nand:
189 NewVal = Builder.CreateAnd(Loaded, Builder.CreateNot(AI->getValOperand()),
190 "new");
191 break;
192 case AtomicRMWInst::Or:
193 NewVal = Builder.CreateOr(Loaded, AI->getValOperand(), "new");
194 break;
195 case AtomicRMWInst::Xor:
196 NewVal = Builder.CreateXor(Loaded, AI->getValOperand(), "new");
197 break;
198 case AtomicRMWInst::Max:
199 NewVal = Builder.CreateICmpSGT(Loaded, AI->getValOperand());
200 NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
201 break;
202 case AtomicRMWInst::Min:
203 NewVal = Builder.CreateICmpSLE(Loaded, AI->getValOperand());
204 NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
205 break;
206 case AtomicRMWInst::UMax:
207 NewVal = Builder.CreateICmpUGT(Loaded, AI->getValOperand());
208 NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
209 break;
210 case AtomicRMWInst::UMin:
211 NewVal = Builder.CreateICmpULE(Loaded, AI->getValOperand());
212 NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
213 break;
214 default:
215 llvm_unreachable("Unknown atomic op");
216 }
217
218 Value *StoreSuccess = storeConditional(Builder, NewVal, Addr, MemOpOrder);
219 Value *TryAgain = Builder.CreateICmpNE(
220 StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
221 Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
222
223 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
224 insertTrailingFence(Builder, Order);
225
226 AI->replaceAllUsesWith(Loaded);
227 AI->eraseFromParent();
228
229 return true;
230}
231
232bool ARMAtomicExpandPass::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
233 AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
234 AtomicOrdering FailureOrder = CI->getFailureOrdering();
235 Value *Addr = CI->getPointerOperand();
236 BasicBlock *BB = CI->getParent();
237 Function *F = BB->getParent();
238 LLVMContext &Ctx = F->getContext();
239
240 // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
241 //
242 // The full expansion we produce is:
243 // [...]
244 // fence?
245 // cmpxchg.start:
246 // %loaded = @load.linked(%addr)
247 // %should_store = icmp eq %loaded, %desired
248 // br i1 %should_store, label %cmpxchg.trystore,
249 // label %cmpxchg.end/%cmpxchg.barrier
250 // cmpxchg.trystore:
251 // %stored = @store_conditional(%new, %addr)
252 // %try_again = icmp i32 ne %stored, 0
253 // br i1 %try_again, label %loop, label %cmpxchg.end
254 // cmpxchg.barrier:
255 // fence?
256 // br label %cmpxchg.end
257 // cmpxchg.end:
258 // [...]
259 BasicBlock *ExitBB = BB->splitBasicBlock(CI, "cmpxchg.end");
260 auto BarrierBB = BasicBlock::Create(Ctx, "cmpxchg.trystore", F, ExitBB);
261 auto TryStoreBB = BasicBlock::Create(Ctx, "cmpxchg.barrier", F, BarrierBB);
262 auto LoopBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, TryStoreBB);
263
264 // This grabs the DebugLoc from CI
265 IRBuilder<> Builder(CI);
266
267 // The split call above "helpfully" added a branch at the end of BB (to the
268 // wrong place), but we might want a fence too. It's easiest to just remove
269 // the branch entirely.
270 std::prev(BB->end())->eraseFromParent();
271 Builder.SetInsertPoint(BB);
272 AtomicOrdering MemOpOrder = insertLeadingFence(Builder, SuccessOrder);
273 Builder.CreateBr(LoopBB);
274
275 // Start the main loop block now that we've taken care of the preliminaries.
276 Builder.SetInsertPoint(LoopBB);
277 Value *Loaded = loadLinked(Builder, Addr, MemOpOrder);
278 Value *ShouldStore =
279 Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
280
281 // If the the cmpxchg doesn't actually need any ordering when it fails, we can
282 // jump straight past that fence instruction (if it exists).
283 BasicBlock *FailureBB = FailureOrder == Monotonic ? ExitBB : BarrierBB;
284 Builder.CreateCondBr(ShouldStore, TryStoreBB, FailureBB);
285
286 Builder.SetInsertPoint(TryStoreBB);
287 Value *StoreSuccess =
288 storeConditional(Builder, CI->getNewValOperand(), Addr, MemOpOrder);
289 Value *TryAgain = Builder.CreateICmpNE(
290 StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
291 Builder.CreateCondBr(TryAgain, LoopBB, BarrierBB);
292
293 // Finally, make sure later instructions don't get reordered with a fence if
294 // necessary.
295 Builder.SetInsertPoint(BarrierBB);
296 insertTrailingFence(Builder, SuccessOrder);
297 Builder.CreateBr(ExitBB);
298
299 CI->replaceAllUsesWith(Loaded);
300 CI->eraseFromParent();
301
302 return true;
303}
304
305Value *ARMAtomicExpandPass::loadLinked(IRBuilder<> &Builder, Value *Addr,
306 AtomicOrdering Ord) {
307 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
308 Type *ValTy = cast<PointerType>(Addr->getType())->getElementType();
309 bool IsAcquire =
310 Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent;
311
312 // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd
313 // intrinsic must return {i32, i32} and we have to recombine them into a
314 // single i64 here.
315 if (ValTy->getPrimitiveSizeInBits() == 64) {
316 Intrinsic::ID Int =
317 IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
318 Function *Ldrex = llvm::Intrinsic::getDeclaration(M, Int);
319
320 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
321 Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi");
322
323 Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
324 Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
325 Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
326 Hi = Builder.CreateZExt(Hi, ValTy, "hi64");
327 return Builder.CreateOr(
328 Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64");
329 }
330
331 Type *Tys[] = { Addr->getType() };
332 Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex;
333 Function *Ldrex = llvm::Intrinsic::getDeclaration(M, Int, Tys);
334
335 return Builder.CreateTruncOrBitCast(
336 Builder.CreateCall(Ldrex, Addr),
337 cast<PointerType>(Addr->getType())->getElementType());
338}
339
340Value *ARMAtomicExpandPass::storeConditional(IRBuilder<> &Builder, Value *Val,
341 Value *Addr, AtomicOrdering Ord) {
342 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
343 bool IsRelease =
344 Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent;
345
346 // Since the intrinsics must have legal type, the i64 intrinsics take two
347 // parameters: "i32, i32". We must marshal Val into the appropriate form
348 // before the call.
349 if (Val->getType()->getPrimitiveSizeInBits() == 64) {
350 Intrinsic::ID Int =
351 IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
352 Function *Strex = Intrinsic::getDeclaration(M, Int);
353 Type *Int32Ty = Type::getInt32Ty(M->getContext());
354
355 Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo");
356 Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi");
357 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
358 return Builder.CreateCall3(Strex, Lo, Hi, Addr);
359 }
360
361 Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex;
362 Type *Tys[] = { Addr->getType() };
363 Function *Strex = Intrinsic::getDeclaration(M, Int, Tys);
364
365 return Builder.CreateCall2(
366 Strex, Builder.CreateZExtOrBitCast(
367 Val, Strex->getFunctionType()->getParamType(0)),
368 Addr);
369}
370
371AtomicOrdering ARMAtomicExpandPass::insertLeadingFence(IRBuilder<> &Builder,
372 AtomicOrdering Ord) {
373 if (!TLI->getInsertFencesForAtomic())
374 return Ord;
375
376 if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent)
377 Builder.CreateFence(Release);
378
379 // The exclusive operations don't need any barrier if we're adding separate
380 // fences.
381 return Monotonic;
382}
383
384void ARMAtomicExpandPass::insertTrailingFence(IRBuilder<> &Builder,
385 AtomicOrdering Ord) {
386 if (!TLI->getInsertFencesForAtomic())
387 return;
388
389 if (Ord == Acquire || Ord == AcquireRelease)
390 Builder.CreateFence(Acquire);
391 else if (Ord == SequentiallyConsistent)
392 Builder.CreateFence(SequentiallyConsistent);
393}
394
395bool ARMAtomicExpandPass::shouldExpandAtomic(Instruction *Inst) {
396 // Loads and stores less than 64-bits are already atomic; ones above that
397 // are doomed anyway, so defer to the default libcall and blame the OS when
398 // things go wrong:
399 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
400 return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() == 64;
401 else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
402 return LI->getType()->getPrimitiveSizeInBits() == 64;
403
404 // For the real atomic operations, we have ldrex/strex up to 64 bits.
405 return Inst->getType()->getPrimitiveSizeInBits() <= 64;
406}