blob: 33cdda5d6e14b9edbe811408685586b5add42b28 [file] [log] [blame]
Tim Northoverc882eb02014-04-03 11:44:58 +00001//===-- ARMAtomicExpandPass.cpp - Expand atomic instructions --------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains a pass (at IR level) to replace atomic instructions with
11// appropriate (intrinsic-based) ldrex/strex loops.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "arm-atomic-expand"
16#include "ARM.h"
17#include "llvm/CodeGen/Passes.h"
18#include "llvm/IR/Function.h"
19#include "llvm/IR/IRBuilder.h"
20#include "llvm/IR/Instructions.h"
21#include "llvm/IR/Intrinsics.h"
22#include "llvm/IR/Module.h"
23#include "llvm/Support/Debug.h"
24#include "llvm/Target/TargetLowering.h"
25#include "llvm/Target/TargetMachine.h"
26using namespace llvm;
27
28namespace {
29 class ARMAtomicExpandPass : public FunctionPass {
30 const TargetLowering *TLI;
31 public:
32 static char ID; // Pass identification, replacement for typeid
33 explicit ARMAtomicExpandPass(const TargetMachine *TM = 0)
34 : FunctionPass(ID), TLI(TM->getTargetLowering()) {}
35
36 bool runOnFunction(Function &F) override;
37 bool expandAtomicInsts(Function &F);
38
39 bool expandAtomicLoad(LoadInst *LI);
40 bool expandAtomicStore(StoreInst *LI);
41 bool expandAtomicRMW(AtomicRMWInst *AI);
42 bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
43
44 AtomicOrdering insertLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord);
45 void insertTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord);
46
47 /// Perform a load-linked operation on Addr, returning a "Value *" with the
48 /// corresponding pointee type. This may entail some non-trivial operations
49 /// to truncate or reconstruct illegal types since intrinsics must be legal
50 Value *loadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord);
51
52 /// Perform a store-conditional operation to Addr. Return the status of the
53 /// store: 0 if the it succeeded, non-zero otherwise.
54 Value *storeConditional(IRBuilder<> &Builder, Value *Val, Value *Addr,
55 AtomicOrdering Ord);
56
57 /// Return true if the given (atomic) instruction should be expanded by this
58 /// pass.
59 bool shouldExpandAtomic(Instruction *Inst);
60 };
61}
62
63char ARMAtomicExpandPass::ID = 0;
64
65FunctionPass *llvm::createARMAtomicExpandPass(const TargetMachine *TM) {
66 return new ARMAtomicExpandPass(TM);
67}
68
69bool ARMAtomicExpandPass::runOnFunction(Function &F) {
70 SmallVector<Instruction *, 1> AtomicInsts;
71
72 // Changing control-flow while iterating through it is a bad idea, so gather a
73 // list of all atomic instructions before we start.
74 for (BasicBlock &BB : F)
75 for (Instruction &Inst : BB) {
76 if (isa<AtomicRMWInst>(&Inst) || isa<AtomicCmpXchgInst>(&Inst) ||
77 (isa<LoadInst>(&Inst) && cast<LoadInst>(&Inst)->isAtomic()) ||
78 (isa<StoreInst>(&Inst) && cast<StoreInst>(&Inst)->isAtomic()))
79 AtomicInsts.push_back(&Inst);
80 }
81
82 bool MadeChange = false;
83 for (Instruction *Inst : AtomicInsts) {
84 if (!shouldExpandAtomic(Inst))
85 continue;
86
87 if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Inst))
88 MadeChange |= expandAtomicRMW(AI);
89 else if (AtomicCmpXchgInst *CI = dyn_cast<AtomicCmpXchgInst>(Inst))
90 MadeChange |= expandAtomicCmpXchg(CI);
91 else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
92 MadeChange |= expandAtomicLoad(LI);
93 else if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
94 MadeChange |= expandAtomicStore(SI);
95 else
96 llvm_unreachable("Unknown atomic instruction");
97 }
98
99 return MadeChange;
100}
101
102bool ARMAtomicExpandPass::expandAtomicLoad(LoadInst *LI) {
103 // Load instructions don't actually need a leading fence, even in the
104 // SequentiallyConsistent case.
105 AtomicOrdering MemOpOrder =
106 TLI->getInsertFencesForAtomic() ? Monotonic : LI->getOrdering();
107
108 // The only 64-bit load guaranteed to be single-copy atomic by the ARM ARM is
109 // an ldrexd (A3.5.3).
110 IRBuilder<> Builder(LI);
111 Value *Val = loadLinked(Builder, LI->getPointerOperand(), MemOpOrder);
112
113 insertTrailingFence(Builder, LI->getOrdering());
114
115 LI->replaceAllUsesWith(Val);
116 LI->eraseFromParent();
117
118 return true;
119}
120
121bool ARMAtomicExpandPass::expandAtomicStore(StoreInst *SI) {
122 // The only atomic 64-bit store on ARM is an strexd that succeeds, which means
123 // we need a loop and the entire instruction is essentially an "atomicrmw
124 // xchg" that ignores the value loaded.
125 IRBuilder<> Builder(SI);
126 AtomicRMWInst *AI =
127 Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
128 SI->getValueOperand(), SI->getOrdering());
129 SI->eraseFromParent();
130
131 // Now we have an appropriate swap instruction, lower it as usual.
132 return expandAtomicRMW(AI);
133}
134
135bool ARMAtomicExpandPass::expandAtomicRMW(AtomicRMWInst *AI) {
136 AtomicOrdering Order = AI->getOrdering();
137 Value *Addr = AI->getPointerOperand();
138 BasicBlock *BB = AI->getParent();
139 Function *F = BB->getParent();
140 LLVMContext &Ctx = F->getContext();
141
142 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
143 //
144 // The standard expansion we produce is:
145 // [...]
146 // fence?
147 // atomicrmw.start:
148 // %loaded = @load.linked(%addr)
149 // %new = some_op iN %loaded, %incr
150 // %stored = @store_conditional(%new, %addr)
151 // %try_again = icmp i32 ne %stored, 0
152 // br i1 %try_again, label %loop, label %atomicrmw.end
153 // atomicrmw.end:
154 // fence?
155 // [...]
156 BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
157 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
158
159 // This grabs the DebugLoc from AI.
160 IRBuilder<> Builder(AI);
161
162 // The split call above "helpfully" added a branch at the end of BB (to the
163 // wrong place), but we might want a fence too. It's easiest to just remove
164 // the branch entirely.
165 std::prev(BB->end())->eraseFromParent();
166 Builder.SetInsertPoint(BB);
167 AtomicOrdering MemOpOrder = insertLeadingFence(Builder, Order);
168 Builder.CreateBr(LoopBB);
169
170 // Start the main loop block now that we've taken care of the preliminaries.
171 Builder.SetInsertPoint(LoopBB);
172 Value *Loaded = loadLinked(Builder, Addr, MemOpOrder);
173
174 Value *NewVal;
175 switch (AI->getOperation()) {
176 case AtomicRMWInst::Xchg:
177 NewVal = AI->getValOperand();
178 break;
179 case AtomicRMWInst::Add:
180 NewVal = Builder.CreateAdd(Loaded, AI->getValOperand(), "new");
181 break;
182 case AtomicRMWInst::Sub:
183 NewVal = Builder.CreateSub(Loaded, AI->getValOperand(), "new");
184 break;
185 case AtomicRMWInst::And:
186 NewVal = Builder.CreateAnd(Loaded, AI->getValOperand(), "new");
187 break;
188 case AtomicRMWInst::Nand:
189 NewVal = Builder.CreateAnd(Loaded, Builder.CreateNot(AI->getValOperand()),
190 "new");
191 break;
192 case AtomicRMWInst::Or:
193 NewVal = Builder.CreateOr(Loaded, AI->getValOperand(), "new");
194 break;
195 case AtomicRMWInst::Xor:
196 NewVal = Builder.CreateXor(Loaded, AI->getValOperand(), "new");
197 break;
198 case AtomicRMWInst::Max:
199 NewVal = Builder.CreateICmpSGT(Loaded, AI->getValOperand());
200 NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
201 break;
202 case AtomicRMWInst::Min:
203 NewVal = Builder.CreateICmpSLE(Loaded, AI->getValOperand());
204 NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
205 break;
206 case AtomicRMWInst::UMax:
207 NewVal = Builder.CreateICmpUGT(Loaded, AI->getValOperand());
208 NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
209 break;
210 case AtomicRMWInst::UMin:
211 NewVal = Builder.CreateICmpULE(Loaded, AI->getValOperand());
212 NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
213 break;
214 default:
215 llvm_unreachable("Unknown atomic op");
216 }
217
218 Value *StoreSuccess = storeConditional(Builder, NewVal, Addr, MemOpOrder);
219 Value *TryAgain = Builder.CreateICmpNE(
220 StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
221 Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
222
223 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
224 insertTrailingFence(Builder, Order);
225
226 AI->replaceAllUsesWith(Loaded);
227 AI->eraseFromParent();
228
229 return true;
230}
231
232bool ARMAtomicExpandPass::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
233 AtomicOrdering Order = CI->getSuccessOrdering();
234 Value *Addr = CI->getPointerOperand();
235 BasicBlock *BB = CI->getParent();
236 Function *F = BB->getParent();
237 LLVMContext &Ctx = F->getContext();
238
239 // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
240 //
241 // The standard expansion we produce is:
242 // [...]
243 // fence?
244 // cmpxchg.start:
245 // %loaded = @load.linked(%addr)
246 // %should_store = icmp eq %loaded, %desired
247 // br i1 %should_store, label %cmpxchg.trystore, label %cmpxchg.end
248 // cmpxchg.trystore:
249 // %stored = @store_conditional(%new, %addr)
250 // %try_again = icmp i32 ne %stored, 0
251 // br i1 %try_again, label %loop, label %cmpxchg.end
252 // cmpxchg.end:
253 // fence?
254 // [...]
255 BasicBlock *ExitBB = BB->splitBasicBlock(CI, "cmpxchg.end");
256 BasicBlock *TryStoreBB =
257 BasicBlock::Create(Ctx, "cmpxchg.trystore", F, ExitBB);
258 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, TryStoreBB);
259
260 // This grabs the DebugLoc from CI
261 IRBuilder<> Builder(CI);
262
263 // The split call above "helpfully" added a branch at the end of BB (to the
264 // wrong place), but we might want a fence too. It's easiest to just remove
265 // the branch entirely.
266 std::prev(BB->end())->eraseFromParent();
267 Builder.SetInsertPoint(BB);
268 AtomicOrdering MemOpOrder = insertLeadingFence(Builder, Order);
269 Builder.CreateBr(LoopBB);
270
271 // Start the main loop block now that we've taken care of the preliminaries.
272 Builder.SetInsertPoint(LoopBB);
273 Value *Loaded = loadLinked(Builder, Addr, MemOpOrder);
274 Value *ShouldStore =
275 Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
276 Builder.CreateCondBr(ShouldStore, TryStoreBB, ExitBB);
277
278 Builder.SetInsertPoint(TryStoreBB);
279 Value *StoreSuccess =
280 storeConditional(Builder, CI->getNewValOperand(), Addr, MemOpOrder);
281 Value *TryAgain = Builder.CreateICmpNE(
282 StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
283 Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
284
285 // Finally, make sure later instructions don't get reordered with a fence if
286 // necessary.
287 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
288 insertTrailingFence(Builder, Order);
289
290 CI->replaceAllUsesWith(Loaded);
291 CI->eraseFromParent();
292
293 return true;
294}
295
296Value *ARMAtomicExpandPass::loadLinked(IRBuilder<> &Builder, Value *Addr,
297 AtomicOrdering Ord) {
298 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
299 Type *ValTy = cast<PointerType>(Addr->getType())->getElementType();
300 bool IsAcquire =
301 Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent;
302
303 // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd
304 // intrinsic must return {i32, i32} and we have to recombine them into a
305 // single i64 here.
306 if (ValTy->getPrimitiveSizeInBits() == 64) {
307 Intrinsic::ID Int =
308 IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
309 Function *Ldrex = llvm::Intrinsic::getDeclaration(M, Int);
310
311 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
312 Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi");
313
314 Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
315 Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
316 Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
317 Hi = Builder.CreateZExt(Hi, ValTy, "hi64");
318 return Builder.CreateOr(
319 Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64");
320 }
321
322 Type *Tys[] = { Addr->getType() };
323 Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex;
324 Function *Ldrex = llvm::Intrinsic::getDeclaration(M, Int, Tys);
325
326 return Builder.CreateTruncOrBitCast(
327 Builder.CreateCall(Ldrex, Addr),
328 cast<PointerType>(Addr->getType())->getElementType());
329}
330
331Value *ARMAtomicExpandPass::storeConditional(IRBuilder<> &Builder, Value *Val,
332 Value *Addr, AtomicOrdering Ord) {
333 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
334 bool IsRelease =
335 Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent;
336
337 // Since the intrinsics must have legal type, the i64 intrinsics take two
338 // parameters: "i32, i32". We must marshal Val into the appropriate form
339 // before the call.
340 if (Val->getType()->getPrimitiveSizeInBits() == 64) {
341 Intrinsic::ID Int =
342 IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
343 Function *Strex = Intrinsic::getDeclaration(M, Int);
344 Type *Int32Ty = Type::getInt32Ty(M->getContext());
345
346 Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo");
347 Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi");
348 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
349 return Builder.CreateCall3(Strex, Lo, Hi, Addr);
350 }
351
352 Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex;
353 Type *Tys[] = { Addr->getType() };
354 Function *Strex = Intrinsic::getDeclaration(M, Int, Tys);
355
356 return Builder.CreateCall2(
357 Strex, Builder.CreateZExtOrBitCast(
358 Val, Strex->getFunctionType()->getParamType(0)),
359 Addr);
360}
361
362AtomicOrdering ARMAtomicExpandPass::insertLeadingFence(IRBuilder<> &Builder,
363 AtomicOrdering Ord) {
364 if (!TLI->getInsertFencesForAtomic())
365 return Ord;
366
367 if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent)
368 Builder.CreateFence(Release);
369
370 // The exclusive operations don't need any barrier if we're adding separate
371 // fences.
372 return Monotonic;
373}
374
375void ARMAtomicExpandPass::insertTrailingFence(IRBuilder<> &Builder,
376 AtomicOrdering Ord) {
377 if (!TLI->getInsertFencesForAtomic())
378 return;
379
380 if (Ord == Acquire || Ord == AcquireRelease)
381 Builder.CreateFence(Acquire);
382 else if (Ord == SequentiallyConsistent)
383 Builder.CreateFence(SequentiallyConsistent);
384}
385
386bool ARMAtomicExpandPass::shouldExpandAtomic(Instruction *Inst) {
387 // Loads and stores less than 64-bits are already atomic; ones above that
388 // are doomed anyway, so defer to the default libcall and blame the OS when
389 // things go wrong:
390 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
391 return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() == 64;
392 else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
393 return LI->getType()->getPrimitiveSizeInBits() == 64;
394
395 // For the real atomic operations, we have ldrex/strex up to 64 bits.
396 return Inst->getType()->getPrimitiveSizeInBits() <= 64;
397}