blob: f17e48d2f2a87413000f35c1c093349e939a9d73 [file] [log] [blame]
John McCallfafaaef2013-03-07 21:37:12 +00001//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the code for emitting atomic operations.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCall.h"
16#include "CodeGenModule.h"
17#include "clang/AST/ASTContext.h"
18#include "llvm/IR/DataLayout.h"
19#include "llvm/IR/Intrinsics.h"
20
21using namespace clang;
22using namespace CodeGen;
23
24static void
25EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
26 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
27 uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
28 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
29 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
30
31 switch (E->getOp()) {
32 case AtomicExpr::AO__c11_atomic_init:
33 llvm_unreachable("Already handled!");
34
35 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
36 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
37 case AtomicExpr::AO__atomic_compare_exchange:
38 case AtomicExpr::AO__atomic_compare_exchange_n: {
39 // Note that cmpxchg only supports specifying one ordering and
40 // doesn't support weak cmpxchg, at least at the moment.
41 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
42 LoadVal1->setAlignment(Align);
43 llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2);
44 LoadVal2->setAlignment(Align);
45 llvm::AtomicCmpXchgInst *CXI =
46 CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order);
47 CXI->setVolatile(E->isVolatile());
48 llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1);
49 StoreVal1->setAlignment(Align);
50 llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1);
51 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
52 return;
53 }
54
55 case AtomicExpr::AO__c11_atomic_load:
56 case AtomicExpr::AO__atomic_load_n:
57 case AtomicExpr::AO__atomic_load: {
58 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
59 Load->setAtomic(Order);
60 Load->setAlignment(Size);
61 Load->setVolatile(E->isVolatile());
62 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
63 StoreDest->setAlignment(Align);
64 return;
65 }
66
67 case AtomicExpr::AO__c11_atomic_store:
68 case AtomicExpr::AO__atomic_store:
69 case AtomicExpr::AO__atomic_store_n: {
70 assert(!Dest && "Store does not return a value");
71 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
72 LoadVal1->setAlignment(Align);
73 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
74 Store->setAtomic(Order);
75 Store->setAlignment(Size);
76 Store->setVolatile(E->isVolatile());
77 return;
78 }
79
80 case AtomicExpr::AO__c11_atomic_exchange:
81 case AtomicExpr::AO__atomic_exchange_n:
82 case AtomicExpr::AO__atomic_exchange:
83 Op = llvm::AtomicRMWInst::Xchg;
84 break;
85
86 case AtomicExpr::AO__atomic_add_fetch:
87 PostOp = llvm::Instruction::Add;
88 // Fall through.
89 case AtomicExpr::AO__c11_atomic_fetch_add:
90 case AtomicExpr::AO__atomic_fetch_add:
91 Op = llvm::AtomicRMWInst::Add;
92 break;
93
94 case AtomicExpr::AO__atomic_sub_fetch:
95 PostOp = llvm::Instruction::Sub;
96 // Fall through.
97 case AtomicExpr::AO__c11_atomic_fetch_sub:
98 case AtomicExpr::AO__atomic_fetch_sub:
99 Op = llvm::AtomicRMWInst::Sub;
100 break;
101
102 case AtomicExpr::AO__atomic_and_fetch:
103 PostOp = llvm::Instruction::And;
104 // Fall through.
105 case AtomicExpr::AO__c11_atomic_fetch_and:
106 case AtomicExpr::AO__atomic_fetch_and:
107 Op = llvm::AtomicRMWInst::And;
108 break;
109
110 case AtomicExpr::AO__atomic_or_fetch:
111 PostOp = llvm::Instruction::Or;
112 // Fall through.
113 case AtomicExpr::AO__c11_atomic_fetch_or:
114 case AtomicExpr::AO__atomic_fetch_or:
115 Op = llvm::AtomicRMWInst::Or;
116 break;
117
118 case AtomicExpr::AO__atomic_xor_fetch:
119 PostOp = llvm::Instruction::Xor;
120 // Fall through.
121 case AtomicExpr::AO__c11_atomic_fetch_xor:
122 case AtomicExpr::AO__atomic_fetch_xor:
123 Op = llvm::AtomicRMWInst::Xor;
124 break;
125
126 case AtomicExpr::AO__atomic_nand_fetch:
127 PostOp = llvm::Instruction::And;
128 // Fall through.
129 case AtomicExpr::AO__atomic_fetch_nand:
130 Op = llvm::AtomicRMWInst::Nand;
131 break;
132 }
133
134 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
135 LoadVal1->setAlignment(Align);
136 llvm::AtomicRMWInst *RMWI =
137 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
138 RMWI->setVolatile(E->isVolatile());
139
140 // For __atomic_*_fetch operations, perform the operation again to
141 // determine the value which was written.
142 llvm::Value *Result = RMWI;
143 if (PostOp)
144 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
145 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
146 Result = CGF.Builder.CreateNot(Result);
147 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
148 StoreDest->setAlignment(Align);
149}
150
151// This function emits any expression (scalar, complex, or aggregate)
152// into a temporary alloca.
153static llvm::Value *
154EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
155 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
156 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
157 /*Init*/ true);
158 return DeclPtr;
159}
160
161RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
162 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
163 QualType MemTy = AtomicTy;
164 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
165 MemTy = AT->getValueType();
166 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
167 uint64_t Size = sizeChars.getQuantity();
168 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
169 unsigned Align = alignChars.getQuantity();
170 unsigned MaxInlineWidthInBits =
171 getContext().getTargetInfo().getMaxAtomicInlineWidth();
172 bool UseLibcall = (Size != Align ||
173 getContext().toBits(sizeChars) > MaxInlineWidthInBits);
174
175 llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
176 Ptr = EmitScalarExpr(E->getPtr());
177
178 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
179 assert(!Dest && "Init does not return a value");
180 LValue LV = MakeAddrLValue(Ptr, AtomicTy, alignChars);
181 switch (getEvaluationKind(E->getVal1()->getType())) {
182 case TEK_Scalar:
183 EmitScalarInit(EmitScalarExpr(E->getVal1()), LV);
184 return RValue::get(0);
185 case TEK_Complex:
186 EmitComplexExprIntoLValue(E->getVal1(), LV, /*isInit*/ true);
187 return RValue::get(0);
188 case TEK_Aggregate: {
189 AggValueSlot Slot = AggValueSlot::forLValue(LV,
190 AggValueSlot::IsNotDestructed,
191 AggValueSlot::DoesNotNeedGCBarriers,
192 AggValueSlot::IsNotAliased);
193 EmitAggExpr(E->getVal1(), Slot);
194 return RValue::get(0);
195 }
196 }
197 llvm_unreachable("bad evaluation kind");
198 }
199
200 Order = EmitScalarExpr(E->getOrder());
201
202 switch (E->getOp()) {
203 case AtomicExpr::AO__c11_atomic_init:
204 llvm_unreachable("Already handled!");
205
206 case AtomicExpr::AO__c11_atomic_load:
207 case AtomicExpr::AO__atomic_load_n:
208 break;
209
210 case AtomicExpr::AO__atomic_load:
211 Dest = EmitScalarExpr(E->getVal1());
212 break;
213
214 case AtomicExpr::AO__atomic_store:
215 Val1 = EmitScalarExpr(E->getVal1());
216 break;
217
218 case AtomicExpr::AO__atomic_exchange:
219 Val1 = EmitScalarExpr(E->getVal1());
220 Dest = EmitScalarExpr(E->getVal2());
221 break;
222
223 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
224 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
225 case AtomicExpr::AO__atomic_compare_exchange_n:
226 case AtomicExpr::AO__atomic_compare_exchange:
227 Val1 = EmitScalarExpr(E->getVal1());
228 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
229 Val2 = EmitScalarExpr(E->getVal2());
230 else
231 Val2 = EmitValToTemp(*this, E->getVal2());
232 OrderFail = EmitScalarExpr(E->getOrderFail());
233 // Evaluate and discard the 'weak' argument.
234 if (E->getNumSubExprs() == 6)
235 EmitScalarExpr(E->getWeak());
236 break;
237
238 case AtomicExpr::AO__c11_atomic_fetch_add:
239 case AtomicExpr::AO__c11_atomic_fetch_sub:
240 if (MemTy->isPointerType()) {
241 // For pointer arithmetic, we're required to do a bit of math:
242 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
243 // ... but only for the C11 builtins. The GNU builtins expect the
244 // user to multiply by sizeof(T).
245 QualType Val1Ty = E->getVal1()->getType();
246 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
247 CharUnits PointeeIncAmt =
248 getContext().getTypeSizeInChars(MemTy->getPointeeType());
249 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
250 Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
251 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
252 break;
253 }
254 // Fall through.
255 case AtomicExpr::AO__atomic_fetch_add:
256 case AtomicExpr::AO__atomic_fetch_sub:
257 case AtomicExpr::AO__atomic_add_fetch:
258 case AtomicExpr::AO__atomic_sub_fetch:
259 case AtomicExpr::AO__c11_atomic_store:
260 case AtomicExpr::AO__c11_atomic_exchange:
261 case AtomicExpr::AO__atomic_store_n:
262 case AtomicExpr::AO__atomic_exchange_n:
263 case AtomicExpr::AO__c11_atomic_fetch_and:
264 case AtomicExpr::AO__c11_atomic_fetch_or:
265 case AtomicExpr::AO__c11_atomic_fetch_xor:
266 case AtomicExpr::AO__atomic_fetch_and:
267 case AtomicExpr::AO__atomic_fetch_or:
268 case AtomicExpr::AO__atomic_fetch_xor:
269 case AtomicExpr::AO__atomic_fetch_nand:
270 case AtomicExpr::AO__atomic_and_fetch:
271 case AtomicExpr::AO__atomic_or_fetch:
272 case AtomicExpr::AO__atomic_xor_fetch:
273 case AtomicExpr::AO__atomic_nand_fetch:
274 Val1 = EmitValToTemp(*this, E->getVal1());
275 break;
276 }
277
278 if (!E->getType()->isVoidType() && !Dest)
279 Dest = CreateMemTemp(E->getType(), ".atomicdst");
280
281 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
282 if (UseLibcall) {
283
284 SmallVector<QualType, 5> Params;
285 CallArgList Args;
286 // Size is always the first parameter
287 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
288 getContext().getSizeType());
289 // Atomic address is always the second parameter
290 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
291 getContext().VoidPtrTy);
292
293 const char* LibCallName;
294 QualType RetTy = getContext().VoidTy;
295 switch (E->getOp()) {
296 // There is only one libcall for compare an exchange, because there is no
297 // optimisation benefit possible from a libcall version of a weak compare
298 // and exchange.
299 // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
300 // void *desired, int success, int failure)
301 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
302 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
303 case AtomicExpr::AO__atomic_compare_exchange:
304 case AtomicExpr::AO__atomic_compare_exchange_n:
305 LibCallName = "__atomic_compare_exchange";
306 RetTy = getContext().BoolTy;
307 Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
308 getContext().VoidPtrTy);
309 Args.add(RValue::get(EmitCastToVoidPtr(Val2)),
310 getContext().VoidPtrTy);
311 Args.add(RValue::get(Order),
312 getContext().IntTy);
313 Order = OrderFail;
314 break;
315 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
316 // int order)
317 case AtomicExpr::AO__c11_atomic_exchange:
318 case AtomicExpr::AO__atomic_exchange_n:
319 case AtomicExpr::AO__atomic_exchange:
320 LibCallName = "__atomic_exchange";
321 Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
322 getContext().VoidPtrTy);
323 Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
324 getContext().VoidPtrTy);
325 break;
326 // void __atomic_store(size_t size, void *mem, void *val, int order)
327 case AtomicExpr::AO__c11_atomic_store:
328 case AtomicExpr::AO__atomic_store:
329 case AtomicExpr::AO__atomic_store_n:
330 LibCallName = "__atomic_store";
331 Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
332 getContext().VoidPtrTy);
333 break;
334 // void __atomic_load(size_t size, void *mem, void *return, int order)
335 case AtomicExpr::AO__c11_atomic_load:
336 case AtomicExpr::AO__atomic_load:
337 case AtomicExpr::AO__atomic_load_n:
338 LibCallName = "__atomic_load";
339 Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
340 getContext().VoidPtrTy);
341 break;
342#if 0
343 // These are only defined for 1-16 byte integers. It is not clear what
344 // their semantics would be on anything else...
345 case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break;
346 case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break;
347 case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break;
348 case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break;
349 case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break;
350#endif
351 default: return EmitUnsupportedRValue(E, "atomic library call");
352 }
353 // order is always the last parameter
354 Args.add(RValue::get(Order),
355 getContext().IntTy);
356
357 const CGFunctionInfo &FuncInfo =
358 CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
359 FunctionType::ExtInfo(), RequiredArgs::All);
360 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
361 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
362 RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
363 if (E->isCmpXChg())
364 return Res;
365 if (E->getType()->isVoidType())
366 return RValue::get(0);
367 return convertTempToRValue(Dest, E->getType());
368 }
369
370 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
371 E->getOp() == AtomicExpr::AO__atomic_store ||
372 E->getOp() == AtomicExpr::AO__atomic_store_n;
373 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
374 E->getOp() == AtomicExpr::AO__atomic_load ||
375 E->getOp() == AtomicExpr::AO__atomic_load_n;
376
377 llvm::Type *IPtrTy =
378 llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
379 llvm::Value *OrigDest = Dest;
380 Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
381 if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
382 if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
383 if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
384
385 if (isa<llvm::ConstantInt>(Order)) {
386 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
387 switch (ord) {
388 case 0: // memory_order_relaxed
389 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
390 llvm::Monotonic);
391 break;
392 case 1: // memory_order_consume
393 case 2: // memory_order_acquire
394 if (IsStore)
395 break; // Avoid crashing on code with undefined behavior
396 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
397 llvm::Acquire);
398 break;
399 case 3: // memory_order_release
400 if (IsLoad)
401 break; // Avoid crashing on code with undefined behavior
402 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
403 llvm::Release);
404 break;
405 case 4: // memory_order_acq_rel
406 if (IsLoad || IsStore)
407 break; // Avoid crashing on code with undefined behavior
408 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
409 llvm::AcquireRelease);
410 break;
411 case 5: // memory_order_seq_cst
412 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
413 llvm::SequentiallyConsistent);
414 break;
415 default: // invalid order
416 // We should not ever get here normally, but it's hard to
417 // enforce that in general.
418 break;
419 }
420 if (E->getType()->isVoidType())
421 return RValue::get(0);
422 return convertTempToRValue(OrigDest, E->getType());
423 }
424
425 // Long case, when Order isn't obviously constant.
426
427 // Create all the relevant BB's
428 llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
429 *AcqRelBB = 0, *SeqCstBB = 0;
430 MonotonicBB = createBasicBlock("monotonic", CurFn);
431 if (!IsStore)
432 AcquireBB = createBasicBlock("acquire", CurFn);
433 if (!IsLoad)
434 ReleaseBB = createBasicBlock("release", CurFn);
435 if (!IsLoad && !IsStore)
436 AcqRelBB = createBasicBlock("acqrel", CurFn);
437 SeqCstBB = createBasicBlock("seqcst", CurFn);
438 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
439
440 // Create the switch for the split
441 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
442 // doesn't matter unless someone is crazy enough to use something that
443 // doesn't fold to a constant for the ordering.
444 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
445 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
446
447 // Emit all the different atomics
448 Builder.SetInsertPoint(MonotonicBB);
449 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
450 llvm::Monotonic);
451 Builder.CreateBr(ContBB);
452 if (!IsStore) {
453 Builder.SetInsertPoint(AcquireBB);
454 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
455 llvm::Acquire);
456 Builder.CreateBr(ContBB);
457 SI->addCase(Builder.getInt32(1), AcquireBB);
458 SI->addCase(Builder.getInt32(2), AcquireBB);
459 }
460 if (!IsLoad) {
461 Builder.SetInsertPoint(ReleaseBB);
462 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
463 llvm::Release);
464 Builder.CreateBr(ContBB);
465 SI->addCase(Builder.getInt32(3), ReleaseBB);
466 }
467 if (!IsLoad && !IsStore) {
468 Builder.SetInsertPoint(AcqRelBB);
469 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
470 llvm::AcquireRelease);
471 Builder.CreateBr(ContBB);
472 SI->addCase(Builder.getInt32(4), AcqRelBB);
473 }
474 Builder.SetInsertPoint(SeqCstBB);
475 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
476 llvm::SequentiallyConsistent);
477 Builder.CreateBr(ContBB);
478 SI->addCase(Builder.getInt32(5), SeqCstBB);
479
480 // Cleanup and return
481 Builder.SetInsertPoint(ContBB);
482 if (E->getType()->isVoidType())
483 return RValue::get(0);
484 return convertTempToRValue(OrigDest, E->getType());
485}