blob: b5a494e2879f87289c3c0623543f12e5a394180b [file] [log] [blame]
John McCallfc207f22013-03-07 21:37:12 +00001//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the code for emitting atomic operations.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCall.h"
16#include "CodeGenModule.h"
17#include "clang/AST/ASTContext.h"
Mark Laceya8e7df32013-10-30 21:53:58 +000018#include "clang/CodeGen/CGFunctionInfo.h"
Ed Schoutenc7e82bd2013-05-31 19:27:59 +000019#include "llvm/ADT/StringExtras.h"
John McCallfc207f22013-03-07 21:37:12 +000020#include "llvm/IR/DataLayout.h"
21#include "llvm/IR/Intrinsics.h"
John McCalla8ec7eb2013-03-07 21:37:17 +000022#include "llvm/IR/Operator.h"
John McCallfc207f22013-03-07 21:37:12 +000023
24using namespace clang;
25using namespace CodeGen;
26
John McCalla8ec7eb2013-03-07 21:37:17 +000027// The ABI values for various atomic memory orderings.
28enum AtomicOrderingKind {
29 AO_ABI_memory_order_relaxed = 0,
30 AO_ABI_memory_order_consume = 1,
31 AO_ABI_memory_order_acquire = 2,
32 AO_ABI_memory_order_release = 3,
33 AO_ABI_memory_order_acq_rel = 4,
34 AO_ABI_memory_order_seq_cst = 5
35};
36
37namespace {
38 class AtomicInfo {
39 CodeGenFunction &CGF;
40 QualType AtomicTy;
41 QualType ValueTy;
42 uint64_t AtomicSizeInBits;
43 uint64_t ValueSizeInBits;
44 CharUnits AtomicAlign;
45 CharUnits ValueAlign;
46 CharUnits LValueAlign;
47 TypeEvaluationKind EvaluationKind;
48 bool UseLibcall;
49 public:
50 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
51 assert(lvalue.isSimple());
52
53 AtomicTy = lvalue.getType();
54 ValueTy = AtomicTy->castAs<AtomicType>()->getValueType();
55 EvaluationKind = CGF.getEvaluationKind(ValueTy);
56
57 ASTContext &C = CGF.getContext();
58
59 uint64_t valueAlignInBits;
Benjamin Kramer867ea1d2014-03-02 13:01:17 +000060 std::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy);
John McCalla8ec7eb2013-03-07 21:37:17 +000061
62 uint64_t atomicAlignInBits;
Benjamin Kramer867ea1d2014-03-02 13:01:17 +000063 std::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy);
John McCalla8ec7eb2013-03-07 21:37:17 +000064
65 assert(ValueSizeInBits <= AtomicSizeInBits);
66 assert(valueAlignInBits <= atomicAlignInBits);
67
68 AtomicAlign = C.toCharUnitsFromBits(atomicAlignInBits);
69 ValueAlign = C.toCharUnitsFromBits(valueAlignInBits);
70 if (lvalue.getAlignment().isZero())
71 lvalue.setAlignment(AtomicAlign);
72
73 UseLibcall =
74 (AtomicSizeInBits > uint64_t(C.toBits(lvalue.getAlignment())) ||
75 AtomicSizeInBits > C.getTargetInfo().getMaxAtomicInlineWidth());
76 }
77
78 QualType getAtomicType() const { return AtomicTy; }
79 QualType getValueType() const { return ValueTy; }
80 CharUnits getAtomicAlignment() const { return AtomicAlign; }
81 CharUnits getValueAlignment() const { return ValueAlign; }
82 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
83 uint64_t getValueSizeInBits() const { return AtomicSizeInBits; }
84 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
85 bool shouldUseLibcall() const { return UseLibcall; }
86
87 /// Is the atomic size larger than the underlying value type?
88 ///
89 /// Note that the absence of padding does not mean that atomic
90 /// objects are completely interchangeable with non-atomic
91 /// objects: we might have promoted the alignment of a type
92 /// without making it bigger.
93 bool hasPadding() const {
94 return (ValueSizeInBits != AtomicSizeInBits);
95 }
96
Eli Friedmanbe4504d2013-07-11 01:32:21 +000097 bool emitMemSetZeroIfNecessary(LValue dest) const;
John McCalla8ec7eb2013-03-07 21:37:17 +000098
99 llvm::Value *getAtomicSizeValue() const {
100 CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
101 return CGF.CGM.getSize(size);
102 }
103
104 /// Cast the given pointer to an integer pointer suitable for
105 /// atomic operations.
106 llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
107
108 /// Turn an atomic-layout object into an r-value.
109 RValue convertTempToRValue(llvm::Value *addr,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000110 AggValueSlot resultSlot,
111 SourceLocation loc) const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000112
113 /// Copy an atomic r-value into atomic-layout memory.
114 void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
115
116 /// Project an l-value down to the value field.
117 LValue projectValue(LValue lvalue) const {
118 llvm::Value *addr = lvalue.getAddress();
119 if (hasPadding())
120 addr = CGF.Builder.CreateStructGEP(addr, 0);
121
122 return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(),
123 CGF.getContext(), lvalue.getTBAAInfo());
124 }
125
126 /// Materialize an atomic r-value in atomic-layout memory.
127 llvm::Value *materializeRValue(RValue rvalue) const;
128
129 private:
130 bool requiresMemSetZero(llvm::Type *type) const;
131 };
132}
133
134static RValue emitAtomicLibcall(CodeGenFunction &CGF,
135 StringRef fnName,
136 QualType resultType,
137 CallArgList &args) {
138 const CGFunctionInfo &fnInfo =
139 CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
140 FunctionType::ExtInfo(), RequiredArgs::All);
141 llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
142 llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
143 return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
144}
145
146/// Does a store of the given IR type modify the full expected width?
147static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
148 uint64_t expectedSize) {
149 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
150}
151
152/// Does the atomic type require memsetting to zero before initialization?
153///
154/// The IR type is provided as a way of making certain queries faster.
155bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
156 // If the atomic type has size padding, we definitely need a memset.
157 if (hasPadding()) return true;
158
159 // Otherwise, do some simple heuristics to try to avoid it:
160 switch (getEvaluationKind()) {
161 // For scalars and complexes, check whether the store size of the
162 // type uses the full size.
163 case TEK_Scalar:
164 return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
165 case TEK_Complex:
166 return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
167 AtomicSizeInBits / 2);
168
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000169 // Padding in structs has an undefined bit pattern. User beware.
John McCalla8ec7eb2013-03-07 21:37:17 +0000170 case TEK_Aggregate:
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000171 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000172 }
173 llvm_unreachable("bad evaluation kind");
174}
175
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000176bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
John McCalla8ec7eb2013-03-07 21:37:17 +0000177 llvm::Value *addr = dest.getAddress();
178 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000179 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000180
181 CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
182 AtomicSizeInBits / 8,
183 dest.getAlignment().getQuantity());
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000184 return true;
John McCalla8ec7eb2013-03-07 21:37:17 +0000185}
186
John McCallfc207f22013-03-07 21:37:12 +0000187static void
188EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
189 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
190 uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
191 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
192 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
193
194 switch (E->getOp()) {
195 case AtomicExpr::AO__c11_atomic_init:
196 llvm_unreachable("Already handled!");
197
198 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
199 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
200 case AtomicExpr::AO__atomic_compare_exchange:
201 case AtomicExpr::AO__atomic_compare_exchange_n: {
202 // Note that cmpxchg only supports specifying one ordering and
203 // doesn't support weak cmpxchg, at least at the moment.
David Majnemer938bc1e2014-03-10 21:35:33 +0000204 llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
205 Expected->setAlignment(Align);
206 llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
207 Desired->setAlignment(Align);
Tim Northover0622b3a2014-03-11 10:49:03 +0000208 llvm::AtomicOrdering FailureOrder =
209 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Order);
210 llvm::AtomicCmpXchgInst *Old = CGF.Builder.CreateAtomicCmpXchg(
211 Ptr, Expected, Desired, Order, FailureOrder);
David Majnemer938bc1e2014-03-10 21:35:33 +0000212 Old->setVolatile(E->isVolatile());
213
214 // Cmp holds the result of the compare-exchange operation: true on success,
215 // false on failure.
216 llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(Old, Expected);
217
218 // This basic block is used to hold the store instruction if the operation
219 // failed.
220 llvm::BasicBlock *StoreExpectedBB =
221 CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
222
223 // This basic block is the exit point of the operation, we should end up
224 // here regardless of whether or not the operation succeeded.
225 llvm::BasicBlock *ContinueBB =
226 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
227
228 // Update Expected if Expected isn't equal to Old, otherwise branch to the
229 // exit point.
230 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
231
232 CGF.Builder.SetInsertPoint(StoreExpectedBB);
233 // Update the memory at Expected with Old's value.
234 llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
235 StoreExpected->setAlignment(Align);
236 // Finally, branch to the exit point.
237 CGF.Builder.CreateBr(ContinueBB);
238
239 CGF.Builder.SetInsertPoint(ContinueBB);
240 // Update the memory at Dest with Cmp's value.
John McCallfc207f22013-03-07 21:37:12 +0000241 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
242 return;
243 }
244
245 case AtomicExpr::AO__c11_atomic_load:
246 case AtomicExpr::AO__atomic_load_n:
247 case AtomicExpr::AO__atomic_load: {
248 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
249 Load->setAtomic(Order);
250 Load->setAlignment(Size);
251 Load->setVolatile(E->isVolatile());
252 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
253 StoreDest->setAlignment(Align);
254 return;
255 }
256
257 case AtomicExpr::AO__c11_atomic_store:
258 case AtomicExpr::AO__atomic_store:
259 case AtomicExpr::AO__atomic_store_n: {
260 assert(!Dest && "Store does not return a value");
261 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
262 LoadVal1->setAlignment(Align);
263 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
264 Store->setAtomic(Order);
265 Store->setAlignment(Size);
266 Store->setVolatile(E->isVolatile());
267 return;
268 }
269
270 case AtomicExpr::AO__c11_atomic_exchange:
271 case AtomicExpr::AO__atomic_exchange_n:
272 case AtomicExpr::AO__atomic_exchange:
273 Op = llvm::AtomicRMWInst::Xchg;
274 break;
275
276 case AtomicExpr::AO__atomic_add_fetch:
277 PostOp = llvm::Instruction::Add;
278 // Fall through.
279 case AtomicExpr::AO__c11_atomic_fetch_add:
280 case AtomicExpr::AO__atomic_fetch_add:
281 Op = llvm::AtomicRMWInst::Add;
282 break;
283
284 case AtomicExpr::AO__atomic_sub_fetch:
285 PostOp = llvm::Instruction::Sub;
286 // Fall through.
287 case AtomicExpr::AO__c11_atomic_fetch_sub:
288 case AtomicExpr::AO__atomic_fetch_sub:
289 Op = llvm::AtomicRMWInst::Sub;
290 break;
291
292 case AtomicExpr::AO__atomic_and_fetch:
293 PostOp = llvm::Instruction::And;
294 // Fall through.
295 case AtomicExpr::AO__c11_atomic_fetch_and:
296 case AtomicExpr::AO__atomic_fetch_and:
297 Op = llvm::AtomicRMWInst::And;
298 break;
299
300 case AtomicExpr::AO__atomic_or_fetch:
301 PostOp = llvm::Instruction::Or;
302 // Fall through.
303 case AtomicExpr::AO__c11_atomic_fetch_or:
304 case AtomicExpr::AO__atomic_fetch_or:
305 Op = llvm::AtomicRMWInst::Or;
306 break;
307
308 case AtomicExpr::AO__atomic_xor_fetch:
309 PostOp = llvm::Instruction::Xor;
310 // Fall through.
311 case AtomicExpr::AO__c11_atomic_fetch_xor:
312 case AtomicExpr::AO__atomic_fetch_xor:
313 Op = llvm::AtomicRMWInst::Xor;
314 break;
315
316 case AtomicExpr::AO__atomic_nand_fetch:
317 PostOp = llvm::Instruction::And;
318 // Fall through.
319 case AtomicExpr::AO__atomic_fetch_nand:
320 Op = llvm::AtomicRMWInst::Nand;
321 break;
322 }
323
324 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
325 LoadVal1->setAlignment(Align);
326 llvm::AtomicRMWInst *RMWI =
327 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
328 RMWI->setVolatile(E->isVolatile());
329
330 // For __atomic_*_fetch operations, perform the operation again to
331 // determine the value which was written.
332 llvm::Value *Result = RMWI;
333 if (PostOp)
334 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
335 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
336 Result = CGF.Builder.CreateNot(Result);
337 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
338 StoreDest->setAlignment(Align);
339}
340
341// This function emits any expression (scalar, complex, or aggregate)
342// into a temporary alloca.
343static llvm::Value *
344EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
345 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
346 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
347 /*Init*/ true);
348 return DeclPtr;
349}
350
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000351static void
352AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000353 bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
354 SourceLocation Loc) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000355 if (UseOptimizedLibcall) {
356 // Load value and pass it to the function directly.
357 unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
Nick Lewycky2d84e842013-10-02 02:29:49 +0000358 Val = CGF.EmitLoadOfScalar(Val, false, Align, ValTy, Loc);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000359 Args.add(RValue::get(Val), ValTy);
360 } else {
361 // Non-optimized functions always take a reference.
362 Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
363 CGF.getContext().VoidPtrTy);
364 }
365}
366
John McCallfc207f22013-03-07 21:37:12 +0000367RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
368 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
369 QualType MemTy = AtomicTy;
370 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
371 MemTy = AT->getValueType();
372 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
373 uint64_t Size = sizeChars.getQuantity();
374 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
375 unsigned Align = alignChars.getQuantity();
376 unsigned MaxInlineWidthInBits =
John McCallc8e01702013-04-16 22:48:15 +0000377 getTarget().getMaxAtomicInlineWidth();
John McCallfc207f22013-03-07 21:37:12 +0000378 bool UseLibcall = (Size != Align ||
379 getContext().toBits(sizeChars) > MaxInlineWidthInBits);
380
381 llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
382 Ptr = EmitScalarExpr(E->getPtr());
383
384 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
385 assert(!Dest && "Init does not return a value");
John McCalla8ec7eb2013-03-07 21:37:17 +0000386 LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
387 EmitAtomicInit(E->getVal1(), lvalue);
388 return RValue::get(0);
John McCallfc207f22013-03-07 21:37:12 +0000389 }
390
391 Order = EmitScalarExpr(E->getOrder());
392
393 switch (E->getOp()) {
394 case AtomicExpr::AO__c11_atomic_init:
395 llvm_unreachable("Already handled!");
396
397 case AtomicExpr::AO__c11_atomic_load:
398 case AtomicExpr::AO__atomic_load_n:
399 break;
400
401 case AtomicExpr::AO__atomic_load:
402 Dest = EmitScalarExpr(E->getVal1());
403 break;
404
405 case AtomicExpr::AO__atomic_store:
406 Val1 = EmitScalarExpr(E->getVal1());
407 break;
408
409 case AtomicExpr::AO__atomic_exchange:
410 Val1 = EmitScalarExpr(E->getVal1());
411 Dest = EmitScalarExpr(E->getVal2());
412 break;
413
414 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
415 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
416 case AtomicExpr::AO__atomic_compare_exchange_n:
417 case AtomicExpr::AO__atomic_compare_exchange:
418 Val1 = EmitScalarExpr(E->getVal1());
419 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
420 Val2 = EmitScalarExpr(E->getVal2());
421 else
422 Val2 = EmitValToTemp(*this, E->getVal2());
423 OrderFail = EmitScalarExpr(E->getOrderFail());
424 // Evaluate and discard the 'weak' argument.
425 if (E->getNumSubExprs() == 6)
426 EmitScalarExpr(E->getWeak());
427 break;
428
429 case AtomicExpr::AO__c11_atomic_fetch_add:
430 case AtomicExpr::AO__c11_atomic_fetch_sub:
431 if (MemTy->isPointerType()) {
432 // For pointer arithmetic, we're required to do a bit of math:
433 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
434 // ... but only for the C11 builtins. The GNU builtins expect the
435 // user to multiply by sizeof(T).
436 QualType Val1Ty = E->getVal1()->getType();
437 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
438 CharUnits PointeeIncAmt =
439 getContext().getTypeSizeInChars(MemTy->getPointeeType());
440 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
441 Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
442 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
443 break;
444 }
445 // Fall through.
446 case AtomicExpr::AO__atomic_fetch_add:
447 case AtomicExpr::AO__atomic_fetch_sub:
448 case AtomicExpr::AO__atomic_add_fetch:
449 case AtomicExpr::AO__atomic_sub_fetch:
450 case AtomicExpr::AO__c11_atomic_store:
451 case AtomicExpr::AO__c11_atomic_exchange:
452 case AtomicExpr::AO__atomic_store_n:
453 case AtomicExpr::AO__atomic_exchange_n:
454 case AtomicExpr::AO__c11_atomic_fetch_and:
455 case AtomicExpr::AO__c11_atomic_fetch_or:
456 case AtomicExpr::AO__c11_atomic_fetch_xor:
457 case AtomicExpr::AO__atomic_fetch_and:
458 case AtomicExpr::AO__atomic_fetch_or:
459 case AtomicExpr::AO__atomic_fetch_xor:
460 case AtomicExpr::AO__atomic_fetch_nand:
461 case AtomicExpr::AO__atomic_and_fetch:
462 case AtomicExpr::AO__atomic_or_fetch:
463 case AtomicExpr::AO__atomic_xor_fetch:
464 case AtomicExpr::AO__atomic_nand_fetch:
465 Val1 = EmitValToTemp(*this, E->getVal1());
466 break;
467 }
468
469 if (!E->getType()->isVoidType() && !Dest)
470 Dest = CreateMemTemp(E->getType(), ".atomicdst");
471
472 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
473 if (UseLibcall) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000474 bool UseOptimizedLibcall = false;
475 switch (E->getOp()) {
476 case AtomicExpr::AO__c11_atomic_fetch_add:
477 case AtomicExpr::AO__atomic_fetch_add:
478 case AtomicExpr::AO__c11_atomic_fetch_and:
479 case AtomicExpr::AO__atomic_fetch_and:
480 case AtomicExpr::AO__c11_atomic_fetch_or:
481 case AtomicExpr::AO__atomic_fetch_or:
482 case AtomicExpr::AO__c11_atomic_fetch_sub:
483 case AtomicExpr::AO__atomic_fetch_sub:
484 case AtomicExpr::AO__c11_atomic_fetch_xor:
485 case AtomicExpr::AO__atomic_fetch_xor:
486 // For these, only library calls for certain sizes exist.
487 UseOptimizedLibcall = true;
488 break;
489 default:
490 // Only use optimized library calls for sizes for which they exist.
491 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
492 UseOptimizedLibcall = true;
493 break;
494 }
John McCallfc207f22013-03-07 21:37:12 +0000495
John McCallfc207f22013-03-07 21:37:12 +0000496 CallArgList Args;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000497 if (!UseOptimizedLibcall) {
498 // For non-optimized library calls, the size is the first parameter
499 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
500 getContext().getSizeType());
501 }
502 // Atomic address is the first or second parameter
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000503 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
John McCallfc207f22013-03-07 21:37:12 +0000504
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000505 std::string LibCallName;
506 QualType RetTy;
507 bool HaveRetTy = false;
John McCallfc207f22013-03-07 21:37:12 +0000508 switch (E->getOp()) {
509 // There is only one libcall for compare an exchange, because there is no
510 // optimisation benefit possible from a libcall version of a weak compare
511 // and exchange.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000512 // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
John McCallfc207f22013-03-07 21:37:12 +0000513 // void *desired, int success, int failure)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000514 // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
515 // int success, int failure)
John McCallfc207f22013-03-07 21:37:12 +0000516 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
517 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
518 case AtomicExpr::AO__atomic_compare_exchange:
519 case AtomicExpr::AO__atomic_compare_exchange_n:
520 LibCallName = "__atomic_compare_exchange";
521 RetTy = getContext().BoolTy;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000522 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000523 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
524 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
525 E->getExprLoc());
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000526 Args.add(RValue::get(Order), getContext().IntTy);
John McCallfc207f22013-03-07 21:37:12 +0000527 Order = OrderFail;
528 break;
529 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
530 // int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000531 // T __atomic_exchange_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000532 case AtomicExpr::AO__c11_atomic_exchange:
533 case AtomicExpr::AO__atomic_exchange_n:
534 case AtomicExpr::AO__atomic_exchange:
535 LibCallName = "__atomic_exchange";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000536 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
537 E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000538 break;
539 // void __atomic_store(size_t size, void *mem, void *val, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000540 // void __atomic_store_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000541 case AtomicExpr::AO__c11_atomic_store:
542 case AtomicExpr::AO__atomic_store:
543 case AtomicExpr::AO__atomic_store_n:
544 LibCallName = "__atomic_store";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000545 RetTy = getContext().VoidTy;
546 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000547 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
548 E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000549 break;
550 // void __atomic_load(size_t size, void *mem, void *return, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000551 // T __atomic_load_N(T *mem, int order)
John McCallfc207f22013-03-07 21:37:12 +0000552 case AtomicExpr::AO__c11_atomic_load:
553 case AtomicExpr::AO__atomic_load:
554 case AtomicExpr::AO__atomic_load_n:
555 LibCallName = "__atomic_load";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000556 break;
557 // T __atomic_fetch_add_N(T *mem, T val, int order)
558 case AtomicExpr::AO__c11_atomic_fetch_add:
559 case AtomicExpr::AO__atomic_fetch_add:
560 LibCallName = "__atomic_fetch_add";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000561 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
562 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000563 break;
564 // T __atomic_fetch_and_N(T *mem, T val, int order)
565 case AtomicExpr::AO__c11_atomic_fetch_and:
566 case AtomicExpr::AO__atomic_fetch_and:
567 LibCallName = "__atomic_fetch_and";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000568 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
569 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000570 break;
571 // T __atomic_fetch_or_N(T *mem, T val, int order)
572 case AtomicExpr::AO__c11_atomic_fetch_or:
573 case AtomicExpr::AO__atomic_fetch_or:
574 LibCallName = "__atomic_fetch_or";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000575 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
576 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000577 break;
578 // T __atomic_fetch_sub_N(T *mem, T val, int order)
579 case AtomicExpr::AO__c11_atomic_fetch_sub:
580 case AtomicExpr::AO__atomic_fetch_sub:
581 LibCallName = "__atomic_fetch_sub";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000582 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
583 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000584 break;
585 // T __atomic_fetch_xor_N(T *mem, T val, int order)
586 case AtomicExpr::AO__c11_atomic_fetch_xor:
587 case AtomicExpr::AO__atomic_fetch_xor:
588 LibCallName = "__atomic_fetch_xor";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000589 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
590 E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000591 break;
John McCallfc207f22013-03-07 21:37:12 +0000592 default: return EmitUnsupportedRValue(E, "atomic library call");
593 }
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000594
595 // Optimized functions have the size in their name.
596 if (UseOptimizedLibcall)
597 LibCallName += "_" + llvm::utostr(Size);
598 // By default, assume we return a value of the atomic type.
599 if (!HaveRetTy) {
600 if (UseOptimizedLibcall) {
601 // Value is returned directly.
602 RetTy = MemTy;
603 } else {
604 // Value is returned through parameter before the order.
605 RetTy = getContext().VoidTy;
606 Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
607 getContext().VoidPtrTy);
608 }
609 }
John McCallfc207f22013-03-07 21:37:12 +0000610 // order is always the last parameter
611 Args.add(RValue::get(Order),
612 getContext().IntTy);
613
614 const CGFunctionInfo &FuncInfo =
615 CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
616 FunctionType::ExtInfo(), RequiredArgs::All);
617 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
618 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
619 RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000620 if (!RetTy->isVoidType())
John McCallfc207f22013-03-07 21:37:12 +0000621 return Res;
622 if (E->getType()->isVoidType())
623 return RValue::get(0);
Nick Lewycky2d84e842013-10-02 02:29:49 +0000624 return convertTempToRValue(Dest, E->getType(), E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000625 }
626
627 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
628 E->getOp() == AtomicExpr::AO__atomic_store ||
629 E->getOp() == AtomicExpr::AO__atomic_store_n;
630 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
631 E->getOp() == AtomicExpr::AO__atomic_load ||
632 E->getOp() == AtomicExpr::AO__atomic_load_n;
633
634 llvm::Type *IPtrTy =
635 llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
636 llvm::Value *OrigDest = Dest;
637 Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
638 if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
639 if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
640 if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
641
642 if (isa<llvm::ConstantInt>(Order)) {
643 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
644 switch (ord) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000645 case AO_ABI_memory_order_relaxed:
John McCallfc207f22013-03-07 21:37:12 +0000646 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
647 llvm::Monotonic);
648 break;
John McCalla8ec7eb2013-03-07 21:37:17 +0000649 case AO_ABI_memory_order_consume:
650 case AO_ABI_memory_order_acquire:
John McCallfc207f22013-03-07 21:37:12 +0000651 if (IsStore)
652 break; // Avoid crashing on code with undefined behavior
653 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
654 llvm::Acquire);
655 break;
John McCalla8ec7eb2013-03-07 21:37:17 +0000656 case AO_ABI_memory_order_release:
John McCallfc207f22013-03-07 21:37:12 +0000657 if (IsLoad)
658 break; // Avoid crashing on code with undefined behavior
659 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
660 llvm::Release);
661 break;
John McCalla8ec7eb2013-03-07 21:37:17 +0000662 case AO_ABI_memory_order_acq_rel:
John McCallfc207f22013-03-07 21:37:12 +0000663 if (IsLoad || IsStore)
664 break; // Avoid crashing on code with undefined behavior
665 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
666 llvm::AcquireRelease);
667 break;
John McCalla8ec7eb2013-03-07 21:37:17 +0000668 case AO_ABI_memory_order_seq_cst:
John McCallfc207f22013-03-07 21:37:12 +0000669 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
670 llvm::SequentiallyConsistent);
671 break;
672 default: // invalid order
673 // We should not ever get here normally, but it's hard to
674 // enforce that in general.
675 break;
676 }
677 if (E->getType()->isVoidType())
678 return RValue::get(0);
Nick Lewycky2d84e842013-10-02 02:29:49 +0000679 return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000680 }
681
682 // Long case, when Order isn't obviously constant.
683
684 // Create all the relevant BB's
685 llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
686 *AcqRelBB = 0, *SeqCstBB = 0;
687 MonotonicBB = createBasicBlock("monotonic", CurFn);
688 if (!IsStore)
689 AcquireBB = createBasicBlock("acquire", CurFn);
690 if (!IsLoad)
691 ReleaseBB = createBasicBlock("release", CurFn);
692 if (!IsLoad && !IsStore)
693 AcqRelBB = createBasicBlock("acqrel", CurFn);
694 SeqCstBB = createBasicBlock("seqcst", CurFn);
695 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
696
697 // Create the switch for the split
698 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
699 // doesn't matter unless someone is crazy enough to use something that
700 // doesn't fold to a constant for the ordering.
701 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
702 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
703
704 // Emit all the different atomics
705 Builder.SetInsertPoint(MonotonicBB);
706 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
707 llvm::Monotonic);
708 Builder.CreateBr(ContBB);
709 if (!IsStore) {
710 Builder.SetInsertPoint(AcquireBB);
711 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
712 llvm::Acquire);
713 Builder.CreateBr(ContBB);
714 SI->addCase(Builder.getInt32(1), AcquireBB);
715 SI->addCase(Builder.getInt32(2), AcquireBB);
716 }
717 if (!IsLoad) {
718 Builder.SetInsertPoint(ReleaseBB);
719 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
720 llvm::Release);
721 Builder.CreateBr(ContBB);
722 SI->addCase(Builder.getInt32(3), ReleaseBB);
723 }
724 if (!IsLoad && !IsStore) {
725 Builder.SetInsertPoint(AcqRelBB);
726 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
727 llvm::AcquireRelease);
728 Builder.CreateBr(ContBB);
729 SI->addCase(Builder.getInt32(4), AcqRelBB);
730 }
731 Builder.SetInsertPoint(SeqCstBB);
732 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
733 llvm::SequentiallyConsistent);
734 Builder.CreateBr(ContBB);
735 SI->addCase(Builder.getInt32(5), SeqCstBB);
736
737 // Cleanup and return
738 Builder.SetInsertPoint(ContBB);
739 if (E->getType()->isVoidType())
740 return RValue::get(0);
Nick Lewycky2d84e842013-10-02 02:29:49 +0000741 return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000742}
John McCalla8ec7eb2013-03-07 21:37:17 +0000743
744llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
745 unsigned addrspace =
746 cast<llvm::PointerType>(addr->getType())->getAddressSpace();
747 llvm::IntegerType *ty =
748 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
749 return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
750}
751
752RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000753 AggValueSlot resultSlot,
754 SourceLocation loc) const {
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000755 if (EvaluationKind == TEK_Aggregate)
756 return resultSlot.asRValue();
John McCalla8ec7eb2013-03-07 21:37:17 +0000757
758 // Drill into the padding structure if we have one.
759 if (hasPadding())
760 addr = CGF.Builder.CreateStructGEP(addr, 0);
761
John McCalla8ec7eb2013-03-07 21:37:17 +0000762 // Otherwise, just convert the temporary to an r-value using the
763 // normal conversion routine.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000764 return CGF.convertTempToRValue(addr, getValueType(), loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000765}
766
767/// Emit a load from an l-value of atomic type. Note that the r-value
768/// we produce is an r-value of the atomic *value* type.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000769RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
770 AggValueSlot resultSlot) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000771 AtomicInfo atomics(*this, src);
772
773 // Check whether we should use a library call.
774 if (atomics.shouldUseLibcall()) {
775 llvm::Value *tempAddr;
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000776 if (!resultSlot.isIgnored()) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000777 assert(atomics.getEvaluationKind() == TEK_Aggregate);
778 tempAddr = resultSlot.getAddr();
779 } else {
780 tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
781 }
782
783 // void __atomic_load(size_t size, void *mem, void *return, int order);
784 CallArgList args;
785 args.add(RValue::get(atomics.getAtomicSizeValue()),
786 getContext().getSizeType());
787 args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
788 getContext().VoidPtrTy);
789 args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
790 getContext().VoidPtrTy);
791 args.add(RValue::get(llvm::ConstantInt::get(IntTy,
792 AO_ABI_memory_order_seq_cst)),
793 getContext().IntTy);
794 emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
795
796 // Produce the r-value.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000797 return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000798 }
799
800 // Okay, we're doing this natively.
801 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
802 llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
803 load->setAtomic(llvm::SequentiallyConsistent);
804
805 // Other decoration.
806 load->setAlignment(src.getAlignment().getQuantity());
807 if (src.isVolatileQualified())
808 load->setVolatile(true);
809 if (src.getTBAAInfo())
810 CGM.DecorateInstruction(load, src.getTBAAInfo());
811
812 // Okay, turn that back into the original value type.
813 QualType valueType = atomics.getValueType();
814 llvm::Value *result = load;
815
816 // If we're ignoring an aggregate return, don't do anything.
817 if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
818 return RValue::getAggregate(0, false);
819
820 // The easiest way to do this this is to go through memory, but we
821 // try not to in some easy cases.
822 if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
823 llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
824 if (isa<llvm::IntegerType>(resultTy)) {
825 assert(result->getType() == resultTy);
826 result = EmitFromMemory(result, valueType);
827 } else if (isa<llvm::PointerType>(resultTy)) {
828 result = Builder.CreateIntToPtr(result, resultTy);
829 } else {
830 result = Builder.CreateBitCast(result, resultTy);
831 }
832 return RValue::get(result);
833 }
834
835 // Create a temporary. This needs to be big enough to hold the
836 // atomic integer.
837 llvm::Value *temp;
838 bool tempIsVolatile = false;
839 CharUnits tempAlignment;
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000840 if (atomics.getEvaluationKind() == TEK_Aggregate) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000841 assert(!resultSlot.isIgnored());
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000842 temp = resultSlot.getAddr();
843 tempAlignment = atomics.getValueAlignment();
John McCalla8ec7eb2013-03-07 21:37:17 +0000844 tempIsVolatile = resultSlot.isVolatile();
845 } else {
846 temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
847 tempAlignment = atomics.getAtomicAlignment();
848 }
849
850 // Slam the integer into the temporary.
851 llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
852 Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
853 ->setVolatile(tempIsVolatile);
854
Nick Lewycky2d84e842013-10-02 02:29:49 +0000855 return atomics.convertTempToRValue(temp, resultSlot, loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000856}
857
858
859
860/// Copy an r-value into memory as part of storing to an atomic type.
861/// This needs to create a bit-pattern suitable for atomic operations.
862void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
863 // If we have an r-value, the rvalue should be of the atomic type,
864 // which means that the caller is responsible for having zeroed
865 // any padding. Just do an aggregate copy of that type.
866 if (rvalue.isAggregate()) {
867 CGF.EmitAggregateCopy(dest.getAddress(),
868 rvalue.getAggregateAddr(),
869 getAtomicType(),
870 (rvalue.isVolatileQualified()
871 || dest.isVolatileQualified()),
872 dest.getAlignment());
873 return;
874 }
875
876 // Okay, otherwise we're copying stuff.
877
878 // Zero out the buffer if necessary.
879 emitMemSetZeroIfNecessary(dest);
880
881 // Drill past the padding if present.
882 dest = projectValue(dest);
883
884 // Okay, store the rvalue in.
885 if (rvalue.isScalar()) {
886 CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
887 } else {
888 CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
889 }
890}
891
892
893/// Materialize an r-value into memory for the purposes of storing it
894/// to an atomic type.
895llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
896 // Aggregate r-values are already in memory, and EmitAtomicStore
897 // requires them to be values of the atomic type.
898 if (rvalue.isAggregate())
899 return rvalue.getAggregateAddr();
900
901 // Otherwise, make a temporary and materialize into it.
902 llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
903 LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
904 emitCopyIntoMemory(rvalue, tempLV);
905 return temp;
906}
907
908/// Emit a store to an l-value of atomic type.
909///
910/// Note that the r-value is expected to be an r-value *of the atomic
911/// type*; this means that for aggregate r-values, it should include
912/// storage for any padding that was necessary.
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000913void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000914 // If this is an aggregate r-value, it should agree in type except
915 // maybe for address-space qualification.
916 assert(!rvalue.isAggregate() ||
917 rvalue.getAggregateAddr()->getType()->getPointerElementType()
918 == dest.getAddress()->getType()->getPointerElementType());
919
920 AtomicInfo atomics(*this, dest);
921
922 // If this is an initialization, just put the value there normally.
923 if (isInit) {
924 atomics.emitCopyIntoMemory(rvalue, dest);
925 return;
926 }
927
928 // Check whether we should use a library call.
929 if (atomics.shouldUseLibcall()) {
930 // Produce a source address.
931 llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
932
933 // void __atomic_store(size_t size, void *mem, void *val, int order)
934 CallArgList args;
935 args.add(RValue::get(atomics.getAtomicSizeValue()),
936 getContext().getSizeType());
937 args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
938 getContext().VoidPtrTy);
939 args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
940 getContext().VoidPtrTy);
941 args.add(RValue::get(llvm::ConstantInt::get(IntTy,
942 AO_ABI_memory_order_seq_cst)),
943 getContext().IntTy);
944 emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
945 return;
946 }
947
948 // Okay, we're doing this natively.
949 llvm::Value *intValue;
950
951 // If we've got a scalar value of the right size, try to avoid going
952 // through memory.
953 if (rvalue.isScalar() && !atomics.hasPadding()) {
954 llvm::Value *value = rvalue.getScalarVal();
955 if (isa<llvm::IntegerType>(value->getType())) {
956 intValue = value;
957 } else {
958 llvm::IntegerType *inputIntTy =
959 llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
960 if (isa<llvm::PointerType>(value->getType())) {
961 intValue = Builder.CreatePtrToInt(value, inputIntTy);
962 } else {
963 intValue = Builder.CreateBitCast(value, inputIntTy);
964 }
965 }
966
967 // Otherwise, we need to go through memory.
968 } else {
969 // Put the r-value in memory.
970 llvm::Value *addr = atomics.materializeRValue(rvalue);
971
972 // Cast the temporary to the atomic int type and pull a value out.
973 addr = atomics.emitCastToAtomicIntPointer(addr);
974 intValue = Builder.CreateAlignedLoad(addr,
975 atomics.getAtomicAlignment().getQuantity());
976 }
977
978 // Do the atomic store.
979 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
980 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
981
982 // Initializations don't need to be atomic.
983 if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
984
985 // Other decoration.
986 store->setAlignment(dest.getAlignment().getQuantity());
987 if (dest.isVolatileQualified())
988 store->setVolatile(true);
989 if (dest.getTBAAInfo())
990 CGM.DecorateInstruction(store, dest.getTBAAInfo());
991}
992
993void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
994 AtomicInfo atomics(*this, dest);
995
996 switch (atomics.getEvaluationKind()) {
997 case TEK_Scalar: {
998 llvm::Value *value = EmitScalarExpr(init);
999 atomics.emitCopyIntoMemory(RValue::get(value), dest);
1000 return;
1001 }
1002
1003 case TEK_Complex: {
1004 ComplexPairTy value = EmitComplexExpr(init);
1005 atomics.emitCopyIntoMemory(RValue::getComplex(value), dest);
1006 return;
1007 }
1008
1009 case TEK_Aggregate: {
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001010 // Fix up the destination if the initializer isn't an expression
1011 // of atomic type.
1012 bool Zeroed = false;
John McCalla8ec7eb2013-03-07 21:37:17 +00001013 if (!init->getType()->isAtomicType()) {
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001014 Zeroed = atomics.emitMemSetZeroIfNecessary(dest);
John McCalla8ec7eb2013-03-07 21:37:17 +00001015 dest = atomics.projectValue(dest);
1016 }
1017
1018 // Evaluate the expression directly into the destination.
1019 AggValueSlot slot = AggValueSlot::forLValue(dest,
1020 AggValueSlot::IsNotDestructed,
1021 AggValueSlot::DoesNotNeedGCBarriers,
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001022 AggValueSlot::IsNotAliased,
1023 Zeroed ? AggValueSlot::IsZeroed :
1024 AggValueSlot::IsNotZeroed);
1025
John McCalla8ec7eb2013-03-07 21:37:17 +00001026 EmitAggExpr(init, slot);
1027 return;
1028 }
1029 }
1030 llvm_unreachable("bad evaluation kind");
1031}