blob: 2ba76091f28c23b6c8d405653a936785746ca3eb [file] [log] [blame]
Tim Northover277066a2014-07-01 18:53:31 +00001//===-- X86AtomicExpandPass.cpp - Expand illegal atomic instructions --0---===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains a pass (at IR level) to replace atomic instructions which
11// cannot be implemented as a single instruction with cmpxchg-based loops.
12//
13//===----------------------------------------------------------------------===//
14
15#include "X86.h"
16#include "X86TargetMachine.h"
17#include "llvm/CodeGen/Passes.h"
18#include "llvm/IR/Function.h"
19#include "llvm/IR/IRBuilder.h"
20#include "llvm/IR/Instructions.h"
21#include "llvm/IR/Intrinsics.h"
22#include "llvm/IR/Module.h"
23#include "llvm/Support/Debug.h"
24#include "llvm/Target/TargetLowering.h"
25#include "llvm/Target/TargetMachine.h"
26using namespace llvm;
27
28#define DEBUG_TYPE "x86-atomic-expand"
29
30namespace {
31 class X86AtomicExpandPass : public FunctionPass {
32 const X86TargetMachine *TM;
33 public:
34 static char ID; // Pass identification, replacement for typeid
35 explicit X86AtomicExpandPass(const X86TargetMachine *TM)
36 : FunctionPass(ID), TM(TM) {}
37
38 bool runOnFunction(Function &F) override;
39 bool expandAtomicInsts(Function &F);
40
41 bool needsCmpXchgNb(Type *MemType);
42
43 /// There are four kinds of atomic operations. Two never need expanding:
44 /// cmpxchg is what we expand the others *to*, and loads are easily handled
45 /// by ISelLowering. Atomicrmw and store can need expanding in some
46 /// circumstances.
47 bool shouldExpand(Instruction *Inst);
48
49 /// 128-bit atomic stores (64-bit on i686) need to be implemented in terms
50 /// of trivial cmpxchg16b loops. A simple store isn't necessarily atomic.
51 bool shouldExpandStore(StoreInst *SI);
52
53 /// Only some atomicrmw instructions need expanding -- some operations
54 /// (e.g. max) have absolutely no architectural support; some (e.g. or) have
55 /// limited support but can't return the previous value; some (e.g. add)
56 /// have complete support in the instruction set.
57 ///
58 /// Also, naturally, 128-bit operations always need to be expanded.
59 bool shouldExpandAtomicRMW(AtomicRMWInst *AI);
60
61 bool expandAtomicRMW(AtomicRMWInst *AI);
62 bool expandAtomicStore(StoreInst *SI);
63 };
64}
65
66char X86AtomicExpandPass::ID = 0;
67
68FunctionPass *llvm::createX86AtomicExpandPass(const X86TargetMachine *TM) {
69 return new X86AtomicExpandPass(TM);
70}
71
72bool X86AtomicExpandPass::runOnFunction(Function &F) {
73 SmallVector<Instruction *, 1> AtomicInsts;
74
75 // Changing control-flow while iterating through it is a bad idea, so gather a
76 // list of all atomic instructions before we start.
77 for (BasicBlock &BB : F)
78 for (Instruction &Inst : BB) {
79 if (isa<AtomicRMWInst>(&Inst) ||
80 (isa<StoreInst>(&Inst) && cast<StoreInst>(&Inst)->isAtomic()))
81 AtomicInsts.push_back(&Inst);
82 }
83
84 bool MadeChange = false;
85 for (Instruction *Inst : AtomicInsts) {
86 if (!shouldExpand(Inst))
87 continue;
88
89 if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Inst))
90 MadeChange |= expandAtomicRMW(AI);
91 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
92 MadeChange |= expandAtomicStore(SI);
93 }
94
95 return MadeChange;
96}
97
98/// Returns true if operations on the given type will need to use either
99/// cmpxchg8b or cmpxchg16b. This occurs if the type is 1 step up from the
100/// native width, and the instructions are available (otherwise we leave them
101/// alone to become __sync_fetch_and_... calls).
102bool X86AtomicExpandPass::needsCmpXchgNb(llvm::Type *MemType) {
103 const X86Subtarget &Subtarget = TM->getSubtarget<X86Subtarget>();
104 if (!Subtarget.hasCmpxchg16b())
105 return false;
106
107 unsigned CmpXchgNbWidth = Subtarget.is64Bit() ? 128 : 64;
108
109 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
110 if (OpWidth == CmpXchgNbWidth)
111 return true;
112
113 return false;
114}
115
116
117bool X86AtomicExpandPass::shouldExpandAtomicRMW(AtomicRMWInst *AI) {
118 const X86Subtarget &Subtarget = TM->getSubtarget<X86Subtarget>();
119 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
120
121 if (needsCmpXchgNb(AI->getType()))
122 return true;
123
124 if (AI->getType()->getPrimitiveSizeInBits() > NativeWidth)
125 return false;
126
127 AtomicRMWInst::BinOp Op = AI->getOperation();
128 switch (Op) {
129 default:
130 llvm_unreachable("Unknown atomic operation");
131 case AtomicRMWInst::Xchg:
132 case AtomicRMWInst::Add:
133 case AtomicRMWInst::Sub:
134 // It's better to use xadd, xsub or xchg for these in all cases.
135 return false;
136 case AtomicRMWInst::Or:
137 case AtomicRMWInst::And:
138 case AtomicRMWInst::Xor:
139 // If the atomicrmw's result isn't actually used, we can just add a "lock"
140 // prefix to a normal instruction for these operations.
141 return !AI->use_empty();
142 case AtomicRMWInst::Nand:
143 case AtomicRMWInst::Max:
144 case AtomicRMWInst::Min:
145 case AtomicRMWInst::UMax:
146 case AtomicRMWInst::UMin:
147 // These always require a non-trivial set of data operations on x86. We must
148 // use a cmpxchg loop.
149 return true;
150 }
151}
152
153bool X86AtomicExpandPass::shouldExpandStore(StoreInst *SI) {
154 if (needsCmpXchgNb(SI->getValueOperand()->getType()))
155 return true;
156
157 return false;
158}
159
160bool X86AtomicExpandPass::shouldExpand(Instruction *Inst) {
161 if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Inst))
162 return shouldExpandAtomicRMW(AI);
163 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
164 return shouldExpandStore(SI);
165 return false;
166}
167
168/// Emit IR to implement the given atomicrmw operation on values in registers,
169/// returning the new value.
170static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
171 Value *Loaded, Value *Inc) {
172 Value *NewVal;
173 switch (Op) {
174 case AtomicRMWInst::Xchg:
175 return Inc;
176 case AtomicRMWInst::Add:
177 return Builder.CreateAdd(Loaded, Inc, "new");
178 case AtomicRMWInst::Sub:
179 return Builder.CreateSub(Loaded, Inc, "new");
180 case AtomicRMWInst::And:
181 return Builder.CreateAnd(Loaded, Inc, "new");
182 case AtomicRMWInst::Nand:
183 return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new");
184 case AtomicRMWInst::Or:
185 return Builder.CreateOr(Loaded, Inc, "new");
186 case AtomicRMWInst::Xor:
187 return Builder.CreateXor(Loaded, Inc, "new");
188 case AtomicRMWInst::Max:
189 NewVal = Builder.CreateICmpSGT(Loaded, Inc);
190 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
191 case AtomicRMWInst::Min:
192 NewVal = Builder.CreateICmpSLE(Loaded, Inc);
193 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
194 case AtomicRMWInst::UMax:
195 NewVal = Builder.CreateICmpUGT(Loaded, Inc);
196 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
197 case AtomicRMWInst::UMin:
198 NewVal = Builder.CreateICmpULE(Loaded, Inc);
199 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
200 default:
201 break;
202 }
203 llvm_unreachable("Unknown atomic op");
204}
205
206bool X86AtomicExpandPass::expandAtomicRMW(AtomicRMWInst *AI) {
207 AtomicOrdering Order =
208 AI->getOrdering() == Unordered ? Monotonic : AI->getOrdering();
209 Value *Addr = AI->getPointerOperand();
210 BasicBlock *BB = AI->getParent();
211 Function *F = BB->getParent();
212 LLVMContext &Ctx = F->getContext();
213
214 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
215 //
216 // The standard expansion we produce is:
217 // [...]
218 // %init_loaded = load atomic iN* %addr
219 // br label %loop
220 // loop:
221 // %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
222 // %new = some_op iN %loaded, %incr
223 // %pair = cmpxchg iN* %addr, iN %loaded, iN %new
224 // %new_loaded = extractvalue { iN, i1 } %pair, 0
225 // %success = extractvalue { iN, i1 } %pair, 1
226 // br i1 %success, label %atomicrmw.end, label %loop
227 // atomicrmw.end:
228 // [...]
229 BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
230 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
231
232 // This grabs the DebugLoc from AI.
233 IRBuilder<> Builder(AI);
234
235 // The split call above "helpfully" added a branch at the end of BB (to the
236 // wrong place), but we want a load. It's easiest to just remove
237 // the branch entirely.
238 std::prev(BB->end())->eraseFromParent();
239 Builder.SetInsertPoint(BB);
240 LoadInst *InitLoaded = Builder.CreateLoad(Addr);
241 InitLoaded->setAlignment(AI->getType()->getPrimitiveSizeInBits());
242 Builder.CreateBr(LoopBB);
243
244 // Start the main loop block now that we've taken care of the preliminaries.
245 Builder.SetInsertPoint(LoopBB);
246 PHINode *Loaded = Builder.CreatePHI(AI->getType(), 2, "loaded");
247 Loaded->addIncoming(InitLoaded, BB);
248
249 Value *NewVal =
250 performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
251
252 Value *Pair = Builder.CreateAtomicCmpXchg(
253 Addr, Loaded, NewVal, Order,
254 AtomicCmpXchgInst::getStrongestFailureOrdering(Order));
255 Value *NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
256 Loaded->addIncoming(NewLoaded, LoopBB);
257
258 Value *Success = Builder.CreateExtractValue(Pair, 1, "success");
259 Builder.CreateCondBr(Success, ExitBB, LoopBB);
260
261 AI->replaceAllUsesWith(NewLoaded);
262 AI->eraseFromParent();
263
264 return true;
265}
266
267bool X86AtomicExpandPass::expandAtomicStore(StoreInst *SI) {
268 // An atomic store might need cmpxchg16b (or 8b on x86) to execute. Express
269 // this in terms of the usual expansion to "atomicrmw xchg".
270 IRBuilder<> Builder(SI);
271 AtomicRMWInst *AI =
272 Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
273 SI->getValueOperand(), SI->getOrdering());
274 SI->eraseFromParent();
275
276 // Now we have an appropriate swap instruction, lower it as usual.
277 if (shouldExpandAtomicRMW(AI))
278 return expandAtomicRMW(AI);
279
280 return AI;
281}