blob: 88655774044c77e022390f62337884f479bf186f [file] [log] [blame]
John McCallfc207f22013-03-07 21:37:12 +00001//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the code for emitting atomic operations.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCall.h"
16#include "CodeGenModule.h"
17#include "clang/AST/ASTContext.h"
Mark Laceya8e7df32013-10-30 21:53:58 +000018#include "clang/CodeGen/CGFunctionInfo.h"
Ed Schoutenc7e82bd2013-05-31 19:27:59 +000019#include "llvm/ADT/StringExtras.h"
John McCallfc207f22013-03-07 21:37:12 +000020#include "llvm/IR/DataLayout.h"
21#include "llvm/IR/Intrinsics.h"
John McCalla8ec7eb2013-03-07 21:37:17 +000022#include "llvm/IR/Operator.h"
John McCallfc207f22013-03-07 21:37:12 +000023
24using namespace clang;
25using namespace CodeGen;
26
John McCalla8ec7eb2013-03-07 21:37:17 +000027// The ABI values for various atomic memory orderings.
28enum AtomicOrderingKind {
29 AO_ABI_memory_order_relaxed = 0,
30 AO_ABI_memory_order_consume = 1,
31 AO_ABI_memory_order_acquire = 2,
32 AO_ABI_memory_order_release = 3,
33 AO_ABI_memory_order_acq_rel = 4,
34 AO_ABI_memory_order_seq_cst = 5
35};
36
37namespace {
38 class AtomicInfo {
39 CodeGenFunction &CGF;
40 QualType AtomicTy;
41 QualType ValueTy;
42 uint64_t AtomicSizeInBits;
43 uint64_t ValueSizeInBits;
44 CharUnits AtomicAlign;
45 CharUnits ValueAlign;
46 CharUnits LValueAlign;
47 TypeEvaluationKind EvaluationKind;
48 bool UseLibcall;
49 public:
50 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
51 assert(lvalue.isSimple());
52
53 AtomicTy = lvalue.getType();
54 ValueTy = AtomicTy->castAs<AtomicType>()->getValueType();
55 EvaluationKind = CGF.getEvaluationKind(ValueTy);
56
57 ASTContext &C = CGF.getContext();
58
59 uint64_t valueAlignInBits;
Benjamin Kramer867ea1d2014-03-02 13:01:17 +000060 std::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy);
John McCalla8ec7eb2013-03-07 21:37:17 +000061
62 uint64_t atomicAlignInBits;
Benjamin Kramer867ea1d2014-03-02 13:01:17 +000063 std::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy);
John McCalla8ec7eb2013-03-07 21:37:17 +000064
65 assert(ValueSizeInBits <= AtomicSizeInBits);
66 assert(valueAlignInBits <= atomicAlignInBits);
67
68 AtomicAlign = C.toCharUnitsFromBits(atomicAlignInBits);
69 ValueAlign = C.toCharUnitsFromBits(valueAlignInBits);
70 if (lvalue.getAlignment().isZero())
71 lvalue.setAlignment(AtomicAlign);
72
73 UseLibcall =
74 (AtomicSizeInBits > uint64_t(C.toBits(lvalue.getAlignment())) ||
75 AtomicSizeInBits > C.getTargetInfo().getMaxAtomicInlineWidth());
76 }
77
78 QualType getAtomicType() const { return AtomicTy; }
79 QualType getValueType() const { return ValueTy; }
80 CharUnits getAtomicAlignment() const { return AtomicAlign; }
81 CharUnits getValueAlignment() const { return ValueAlign; }
82 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
83 uint64_t getValueSizeInBits() const { return AtomicSizeInBits; }
84 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
85 bool shouldUseLibcall() const { return UseLibcall; }
86
87 /// Is the atomic size larger than the underlying value type?
88 ///
89 /// Note that the absence of padding does not mean that atomic
90 /// objects are completely interchangeable with non-atomic
91 /// objects: we might have promoted the alignment of a type
92 /// without making it bigger.
93 bool hasPadding() const {
94 return (ValueSizeInBits != AtomicSizeInBits);
95 }
96
Eli Friedmanbe4504d2013-07-11 01:32:21 +000097 bool emitMemSetZeroIfNecessary(LValue dest) const;
John McCalla8ec7eb2013-03-07 21:37:17 +000098
99 llvm::Value *getAtomicSizeValue() const {
100 CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
101 return CGF.CGM.getSize(size);
102 }
103
104 /// Cast the given pointer to an integer pointer suitable for
105 /// atomic operations.
106 llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
107
108 /// Turn an atomic-layout object into an r-value.
109 RValue convertTempToRValue(llvm::Value *addr,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000110 AggValueSlot resultSlot,
111 SourceLocation loc) const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000112
113 /// Copy an atomic r-value into atomic-layout memory.
114 void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
115
116 /// Project an l-value down to the value field.
117 LValue projectValue(LValue lvalue) const {
118 llvm::Value *addr = lvalue.getAddress();
119 if (hasPadding())
120 addr = CGF.Builder.CreateStructGEP(addr, 0);
121
122 return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(),
123 CGF.getContext(), lvalue.getTBAAInfo());
124 }
125
126 /// Materialize an atomic r-value in atomic-layout memory.
127 llvm::Value *materializeRValue(RValue rvalue) const;
128
129 private:
130 bool requiresMemSetZero(llvm::Type *type) const;
131 };
132}
133
134static RValue emitAtomicLibcall(CodeGenFunction &CGF,
135 StringRef fnName,
136 QualType resultType,
137 CallArgList &args) {
138 const CGFunctionInfo &fnInfo =
139 CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
140 FunctionType::ExtInfo(), RequiredArgs::All);
141 llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
142 llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
143 return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
144}
145
146/// Does a store of the given IR type modify the full expected width?
147static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
148 uint64_t expectedSize) {
149 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
150}
151
152/// Does the atomic type require memsetting to zero before initialization?
153///
154/// The IR type is provided as a way of making certain queries faster.
155bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
156 // If the atomic type has size padding, we definitely need a memset.
157 if (hasPadding()) return true;
158
159 // Otherwise, do some simple heuristics to try to avoid it:
160 switch (getEvaluationKind()) {
161 // For scalars and complexes, check whether the store size of the
162 // type uses the full size.
163 case TEK_Scalar:
164 return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
165 case TEK_Complex:
166 return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
167 AtomicSizeInBits / 2);
168
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000169 // Padding in structs has an undefined bit pattern. User beware.
John McCalla8ec7eb2013-03-07 21:37:17 +0000170 case TEK_Aggregate:
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000171 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000172 }
173 llvm_unreachable("bad evaluation kind");
174}
175
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000176bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
John McCalla8ec7eb2013-03-07 21:37:17 +0000177 llvm::Value *addr = dest.getAddress();
178 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000179 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000180
181 CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
182 AtomicSizeInBits / 8,
183 dest.getAlignment().getQuantity());
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000184 return true;
John McCalla8ec7eb2013-03-07 21:37:17 +0000185}
186
John McCallfc207f22013-03-07 21:37:12 +0000187static void
188EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
189 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
190 uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
191 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
192 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
193
194 switch (E->getOp()) {
195 case AtomicExpr::AO__c11_atomic_init:
196 llvm_unreachable("Already handled!");
197
198 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
199 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
200 case AtomicExpr::AO__atomic_compare_exchange:
201 case AtomicExpr::AO__atomic_compare_exchange_n: {
202 // Note that cmpxchg only supports specifying one ordering and
203 // doesn't support weak cmpxchg, at least at the moment.
David Majnemer938bc1e2014-03-10 21:35:33 +0000204
205 llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
206 Expected->setAlignment(Align);
207 llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
208 Desired->setAlignment(Align);
209 llvm::AtomicCmpXchgInst *Old =
210 CGF.Builder.CreateAtomicCmpXchg(Ptr, Expected, Desired, Order);
211 Old->setVolatile(E->isVolatile());
212
213 // Cmp holds the result of the compare-exchange operation: true on success,
214 // false on failure.
215 llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(Old, Expected);
216
217 // This basic block is used to hold the store instruction if the operation
218 // failed.
219 llvm::BasicBlock *StoreExpectedBB =
220 CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
221
222 // This basic block is the exit point of the operation, we should end up
223 // here regardless of whether or not the operation succeeded.
224 llvm::BasicBlock *ContinueBB =
225 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
226
227 // Update Expected if Expected isn't equal to Old, otherwise branch to the
228 // exit point.
229 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
230
231 CGF.Builder.SetInsertPoint(StoreExpectedBB);
232 // Update the memory at Expected with Old's value.
233 llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
234 StoreExpected->setAlignment(Align);
235 // Finally, branch to the exit point.
236 CGF.Builder.CreateBr(ContinueBB);
237
238 CGF.Builder.SetInsertPoint(ContinueBB);
239 // Update the memory at Dest with Cmp's value.
John McCallfc207f22013-03-07 21:37:12 +0000240 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
241 return;
242 }
243
244 case AtomicExpr::AO__c11_atomic_load:
245 case AtomicExpr::AO__atomic_load_n:
246 case AtomicExpr::AO__atomic_load: {
247 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
248 Load->setAtomic(Order);
249 Load->setAlignment(Size);
250 Load->setVolatile(E->isVolatile());
251 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
252 StoreDest->setAlignment(Align);
253 return;
254 }
255
256 case AtomicExpr::AO__c11_atomic_store:
257 case AtomicExpr::AO__atomic_store:
258 case AtomicExpr::AO__atomic_store_n: {
259 assert(!Dest && "Store does not return a value");
260 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
261 LoadVal1->setAlignment(Align);
262 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
263 Store->setAtomic(Order);
264 Store->setAlignment(Size);
265 Store->setVolatile(E->isVolatile());
266 return;
267 }
268
269 case AtomicExpr::AO__c11_atomic_exchange:
270 case AtomicExpr::AO__atomic_exchange_n:
271 case AtomicExpr::AO__atomic_exchange:
272 Op = llvm::AtomicRMWInst::Xchg;
273 break;
274
275 case AtomicExpr::AO__atomic_add_fetch:
276 PostOp = llvm::Instruction::Add;
277 // Fall through.
278 case AtomicExpr::AO__c11_atomic_fetch_add:
279 case AtomicExpr::AO__atomic_fetch_add:
280 Op = llvm::AtomicRMWInst::Add;
281 break;
282
283 case AtomicExpr::AO__atomic_sub_fetch:
284 PostOp = llvm::Instruction::Sub;
285 // Fall through.
286 case AtomicExpr::AO__c11_atomic_fetch_sub:
287 case AtomicExpr::AO__atomic_fetch_sub:
288 Op = llvm::AtomicRMWInst::Sub;
289 break;
290
291 case AtomicExpr::AO__atomic_and_fetch:
292 PostOp = llvm::Instruction::And;
293 // Fall through.
294 case AtomicExpr::AO__c11_atomic_fetch_and:
295 case AtomicExpr::AO__atomic_fetch_and:
296 Op = llvm::AtomicRMWInst::And;
297 break;
298
299 case AtomicExpr::AO__atomic_or_fetch:
300 PostOp = llvm::Instruction::Or;
301 // Fall through.
302 case AtomicExpr::AO__c11_atomic_fetch_or:
303 case AtomicExpr::AO__atomic_fetch_or:
304 Op = llvm::AtomicRMWInst::Or;
305 break;
306
307 case AtomicExpr::AO__atomic_xor_fetch:
308 PostOp = llvm::Instruction::Xor;
309 // Fall through.
310 case AtomicExpr::AO__c11_atomic_fetch_xor:
311 case AtomicExpr::AO__atomic_fetch_xor:
312 Op = llvm::AtomicRMWInst::Xor;
313 break;
314
315 case AtomicExpr::AO__atomic_nand_fetch:
316 PostOp = llvm::Instruction::And;
317 // Fall through.
318 case AtomicExpr::AO__atomic_fetch_nand:
319 Op = llvm::AtomicRMWInst::Nand;
320 break;
321 }
322
323 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
324 LoadVal1->setAlignment(Align);
325 llvm::AtomicRMWInst *RMWI =
326 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
327 RMWI->setVolatile(E->isVolatile());
328
329 // For __atomic_*_fetch operations, perform the operation again to
330 // determine the value which was written.
331 llvm::Value *Result = RMWI;
332 if (PostOp)
333 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
334 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
335 Result = CGF.Builder.CreateNot(Result);
336 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
337 StoreDest->setAlignment(Align);
338}
339
340// This function emits any expression (scalar, complex, or aggregate)
341// into a temporary alloca.
342static llvm::Value *
343EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
344 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
345 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
346 /*Init*/ true);
347 return DeclPtr;
348}
349
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000350static void
351AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000352 bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
353 SourceLocation Loc) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000354 if (UseOptimizedLibcall) {
355 // Load value and pass it to the function directly.
356 unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
Nick Lewycky2d84e842013-10-02 02:29:49 +0000357 Val = CGF.EmitLoadOfScalar(Val, false, Align, ValTy, Loc);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000358 Args.add(RValue::get(Val), ValTy);
359 } else {
360 // Non-optimized functions always take a reference.
361 Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
362 CGF.getContext().VoidPtrTy);
363 }
364}
365
John McCallfc207f22013-03-07 21:37:12 +0000366RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
367 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
368 QualType MemTy = AtomicTy;
369 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
370 MemTy = AT->getValueType();
371 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
372 uint64_t Size = sizeChars.getQuantity();
373 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
374 unsigned Align = alignChars.getQuantity();
375 unsigned MaxInlineWidthInBits =
John McCallc8e01702013-04-16 22:48:15 +0000376 getTarget().getMaxAtomicInlineWidth();
John McCallfc207f22013-03-07 21:37:12 +0000377 bool UseLibcall = (Size != Align ||
378 getContext().toBits(sizeChars) > MaxInlineWidthInBits);
379
380 llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
381 Ptr = EmitScalarExpr(E->getPtr());
382
383 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
384 assert(!Dest && "Init does not return a value");
John McCalla8ec7eb2013-03-07 21:37:17 +0000385 LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
386 EmitAtomicInit(E->getVal1(), lvalue);
387 return RValue::get(0);
John McCallfc207f22013-03-07 21:37:12 +0000388 }
389
390 Order = EmitScalarExpr(E->getOrder());
391
392 switch (E->getOp()) {
393 case AtomicExpr::AO__c11_atomic_init:
394 llvm_unreachable("Already handled!");
395
396 case AtomicExpr::AO__c11_atomic_load:
397 case AtomicExpr::AO__atomic_load_n:
398 break;
399
400 case AtomicExpr::AO__atomic_load:
401 Dest = EmitScalarExpr(E->getVal1());
402 break;
403
404 case AtomicExpr::AO__atomic_store:
405 Val1 = EmitScalarExpr(E->getVal1());
406 break;
407
408 case AtomicExpr::AO__atomic_exchange:
409 Val1 = EmitScalarExpr(E->getVal1());
410 Dest = EmitScalarExpr(E->getVal2());
411 break;
412
413 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
414 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
415 case AtomicExpr::AO__atomic_compare_exchange_n:
416 case AtomicExpr::AO__atomic_compare_exchange:
417 Val1 = EmitScalarExpr(E->getVal1());
418 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
419 Val2 = EmitScalarExpr(E->getVal2());
420 else
421 Val2 = EmitValToTemp(*this, E->getVal2());
422 OrderFail = EmitScalarExpr(E->getOrderFail());
423 // Evaluate and discard the 'weak' argument.
424 if (E->getNumSubExprs() == 6)
425 EmitScalarExpr(E->getWeak());
426 break;
427
428 case AtomicExpr::AO__c11_atomic_fetch_add:
429 case AtomicExpr::AO__c11_atomic_fetch_sub:
430 if (MemTy->isPointerType()) {
431 // For pointer arithmetic, we're required to do a bit of math:
432 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
433 // ... but only for the C11 builtins. The GNU builtins expect the
434 // user to multiply by sizeof(T).
435 QualType Val1Ty = E->getVal1()->getType();
436 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
437 CharUnits PointeeIncAmt =
438 getContext().getTypeSizeInChars(MemTy->getPointeeType());
439 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
440 Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
441 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
442 break;
443 }
444 // Fall through.
445 case AtomicExpr::AO__atomic_fetch_add:
446 case AtomicExpr::AO__atomic_fetch_sub:
447 case AtomicExpr::AO__atomic_add_fetch:
448 case AtomicExpr::AO__atomic_sub_fetch:
449 case AtomicExpr::AO__c11_atomic_store:
450 case AtomicExpr::AO__c11_atomic_exchange:
451 case AtomicExpr::AO__atomic_store_n:
452 case AtomicExpr::AO__atomic_exchange_n:
453 case AtomicExpr::AO__c11_atomic_fetch_and:
454 case AtomicExpr::AO__c11_atomic_fetch_or:
455 case AtomicExpr::AO__c11_atomic_fetch_xor:
456 case AtomicExpr::AO__atomic_fetch_and:
457 case AtomicExpr::AO__atomic_fetch_or:
458 case AtomicExpr::AO__atomic_fetch_xor:
459 case AtomicExpr::AO__atomic_fetch_nand:
460 case AtomicExpr::AO__atomic_and_fetch:
461 case AtomicExpr::AO__atomic_or_fetch:
462 case AtomicExpr::AO__atomic_xor_fetch:
463 case AtomicExpr::AO__atomic_nand_fetch:
464 Val1 = EmitValToTemp(*this, E->getVal1());
465 break;
466 }
467
468 if (!E->getType()->isVoidType() && !Dest)
469 Dest = CreateMemTemp(E->getType(), ".atomicdst");
470
471 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
472 if (UseLibcall) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000473 bool UseOptimizedLibcall = false;
474 switch (E->getOp()) {
475 case AtomicExpr::AO__c11_atomic_fetch_add:
476 case AtomicExpr::AO__atomic_fetch_add:
477 case AtomicExpr::AO__c11_atomic_fetch_and:
478 case AtomicExpr::AO__atomic_fetch_and:
479 case AtomicExpr::AO__c11_atomic_fetch_or:
480 case AtomicExpr::AO__atomic_fetch_or:
481 case AtomicExpr::AO__c11_atomic_fetch_sub:
482 case AtomicExpr::AO__atomic_fetch_sub:
483 case AtomicExpr::AO__c11_atomic_fetch_xor:
484 case AtomicExpr::AO__atomic_fetch_xor:
485 // For these, only library calls for certain sizes exist.
486 UseOptimizedLibcall = true;
487 break;
488 default:
489 // Only use optimized library calls for sizes for which they exist.
490 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
491 UseOptimizedLibcall = true;
492 break;
493 }
John McCallfc207f22013-03-07 21:37:12 +0000494
John McCallfc207f22013-03-07 21:37:12 +0000495 CallArgList Args;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000496 if (!UseOptimizedLibcall) {
497 // For non-optimized library calls, the size is the first parameter
498 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
499 getContext().getSizeType());
500 }
501 // Atomic address is the first or second parameter
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000502 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
John McCallfc207f22013-03-07 21:37:12 +0000503
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000504 std::string LibCallName;
505 QualType RetTy;
506 bool HaveRetTy = false;
John McCallfc207f22013-03-07 21:37:12 +0000507 switch (E->getOp()) {
508 // There is only one libcall for compare an exchange, because there is no
509 // optimisation benefit possible from a libcall version of a weak compare
510 // and exchange.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000511 // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
John McCallfc207f22013-03-07 21:37:12 +0000512 // void *desired, int success, int failure)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000513 // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
514 // int success, int failure)
John McCallfc207f22013-03-07 21:37:12 +0000515 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
516 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
517 case AtomicExpr::AO__atomic_compare_exchange:
518 case AtomicExpr::AO__atomic_compare_exchange_n:
519 LibCallName = "__atomic_compare_exchange";
520 RetTy = getContext().BoolTy;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000521 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000522 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
523 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
524 E->getExprLoc());
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000525 Args.add(RValue::get(Order), getContext().IntTy);
John McCallfc207f22013-03-07 21:37:12 +0000526 Order = OrderFail;
527 break;
528 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
529 // int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000530 // T __atomic_exchange_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000531 case AtomicExpr::AO__c11_atomic_exchange:
532 case AtomicExpr::AO__atomic_exchange_n:
533 case AtomicExpr::AO__atomic_exchange:
534 LibCallName = "__atomic_exchange";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000535 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
536 E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000537 break;
538 // void __atomic_store(size_t size, void *mem, void *val, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000539 // void __atomic_store_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000540 case AtomicExpr::AO__c11_atomic_store:
541 case AtomicExpr::AO__atomic_store:
542 case AtomicExpr::AO__atomic_store_n:
543 LibCallName = "__atomic_store";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000544 RetTy = getContext().VoidTy;
545 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000546 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
547 E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000548 break;
549 // void __atomic_load(size_t size, void *mem, void *return, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000550 // T __atomic_load_N(T *mem, int order)
John McCallfc207f22013-03-07 21:37:12 +0000551 case AtomicExpr::AO__c11_atomic_load:
552 case AtomicExpr::AO__atomic_load:
553 case AtomicExpr::AO__atomic_load_n:
554 LibCallName = "__atomic_load";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000555 break;
556 // T __atomic_fetch_add_N(T *mem, T val, int order)
557 case AtomicExpr::AO__c11_atomic_fetch_add:
558 case AtomicExpr::AO__atomic_fetch_add:
559 LibCallName = "__atomic_fetch_add";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000560 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
561 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000562 break;
563 // T __atomic_fetch_and_N(T *mem, T val, int order)
564 case AtomicExpr::AO__c11_atomic_fetch_and:
565 case AtomicExpr::AO__atomic_fetch_and:
566 LibCallName = "__atomic_fetch_and";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000567 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
568 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000569 break;
570 // T __atomic_fetch_or_N(T *mem, T val, int order)
571 case AtomicExpr::AO__c11_atomic_fetch_or:
572 case AtomicExpr::AO__atomic_fetch_or:
573 LibCallName = "__atomic_fetch_or";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000574 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
575 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000576 break;
577 // T __atomic_fetch_sub_N(T *mem, T val, int order)
578 case AtomicExpr::AO__c11_atomic_fetch_sub:
579 case AtomicExpr::AO__atomic_fetch_sub:
580 LibCallName = "__atomic_fetch_sub";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000581 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
582 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000583 break;
584 // T __atomic_fetch_xor_N(T *mem, T val, int order)
585 case AtomicExpr::AO__c11_atomic_fetch_xor:
586 case AtomicExpr::AO__atomic_fetch_xor:
587 LibCallName = "__atomic_fetch_xor";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000588 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
589 E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000590 break;
John McCallfc207f22013-03-07 21:37:12 +0000591 default: return EmitUnsupportedRValue(E, "atomic library call");
592 }
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000593
594 // Optimized functions have the size in their name.
595 if (UseOptimizedLibcall)
596 LibCallName += "_" + llvm::utostr(Size);
597 // By default, assume we return a value of the atomic type.
598 if (!HaveRetTy) {
599 if (UseOptimizedLibcall) {
600 // Value is returned directly.
601 RetTy = MemTy;
602 } else {
603 // Value is returned through parameter before the order.
604 RetTy = getContext().VoidTy;
605 Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
606 getContext().VoidPtrTy);
607 }
608 }
John McCallfc207f22013-03-07 21:37:12 +0000609 // order is always the last parameter
610 Args.add(RValue::get(Order),
611 getContext().IntTy);
612
613 const CGFunctionInfo &FuncInfo =
614 CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
615 FunctionType::ExtInfo(), RequiredArgs::All);
616 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
617 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
618 RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000619 if (!RetTy->isVoidType())
John McCallfc207f22013-03-07 21:37:12 +0000620 return Res;
621 if (E->getType()->isVoidType())
622 return RValue::get(0);
Nick Lewycky2d84e842013-10-02 02:29:49 +0000623 return convertTempToRValue(Dest, E->getType(), E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000624 }
625
626 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
627 E->getOp() == AtomicExpr::AO__atomic_store ||
628 E->getOp() == AtomicExpr::AO__atomic_store_n;
629 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
630 E->getOp() == AtomicExpr::AO__atomic_load ||
631 E->getOp() == AtomicExpr::AO__atomic_load_n;
632
633 llvm::Type *IPtrTy =
634 llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
635 llvm::Value *OrigDest = Dest;
636 Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
637 if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
638 if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
639 if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
640
641 if (isa<llvm::ConstantInt>(Order)) {
642 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
643 switch (ord) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000644 case AO_ABI_memory_order_relaxed:
John McCallfc207f22013-03-07 21:37:12 +0000645 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
646 llvm::Monotonic);
647 break;
John McCalla8ec7eb2013-03-07 21:37:17 +0000648 case AO_ABI_memory_order_consume:
649 case AO_ABI_memory_order_acquire:
John McCallfc207f22013-03-07 21:37:12 +0000650 if (IsStore)
651 break; // Avoid crashing on code with undefined behavior
652 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
653 llvm::Acquire);
654 break;
John McCalla8ec7eb2013-03-07 21:37:17 +0000655 case AO_ABI_memory_order_release:
John McCallfc207f22013-03-07 21:37:12 +0000656 if (IsLoad)
657 break; // Avoid crashing on code with undefined behavior
658 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
659 llvm::Release);
660 break;
John McCalla8ec7eb2013-03-07 21:37:17 +0000661 case AO_ABI_memory_order_acq_rel:
John McCallfc207f22013-03-07 21:37:12 +0000662 if (IsLoad || IsStore)
663 break; // Avoid crashing on code with undefined behavior
664 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
665 llvm::AcquireRelease);
666 break;
John McCalla8ec7eb2013-03-07 21:37:17 +0000667 case AO_ABI_memory_order_seq_cst:
John McCallfc207f22013-03-07 21:37:12 +0000668 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
669 llvm::SequentiallyConsistent);
670 break;
671 default: // invalid order
672 // We should not ever get here normally, but it's hard to
673 // enforce that in general.
674 break;
675 }
676 if (E->getType()->isVoidType())
677 return RValue::get(0);
Nick Lewycky2d84e842013-10-02 02:29:49 +0000678 return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000679 }
680
681 // Long case, when Order isn't obviously constant.
682
683 // Create all the relevant BB's
684 llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
685 *AcqRelBB = 0, *SeqCstBB = 0;
686 MonotonicBB = createBasicBlock("monotonic", CurFn);
687 if (!IsStore)
688 AcquireBB = createBasicBlock("acquire", CurFn);
689 if (!IsLoad)
690 ReleaseBB = createBasicBlock("release", CurFn);
691 if (!IsLoad && !IsStore)
692 AcqRelBB = createBasicBlock("acqrel", CurFn);
693 SeqCstBB = createBasicBlock("seqcst", CurFn);
694 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
695
696 // Create the switch for the split
697 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
698 // doesn't matter unless someone is crazy enough to use something that
699 // doesn't fold to a constant for the ordering.
700 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
701 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
702
703 // Emit all the different atomics
704 Builder.SetInsertPoint(MonotonicBB);
705 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
706 llvm::Monotonic);
707 Builder.CreateBr(ContBB);
708 if (!IsStore) {
709 Builder.SetInsertPoint(AcquireBB);
710 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
711 llvm::Acquire);
712 Builder.CreateBr(ContBB);
713 SI->addCase(Builder.getInt32(1), AcquireBB);
714 SI->addCase(Builder.getInt32(2), AcquireBB);
715 }
716 if (!IsLoad) {
717 Builder.SetInsertPoint(ReleaseBB);
718 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
719 llvm::Release);
720 Builder.CreateBr(ContBB);
721 SI->addCase(Builder.getInt32(3), ReleaseBB);
722 }
723 if (!IsLoad && !IsStore) {
724 Builder.SetInsertPoint(AcqRelBB);
725 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
726 llvm::AcquireRelease);
727 Builder.CreateBr(ContBB);
728 SI->addCase(Builder.getInt32(4), AcqRelBB);
729 }
730 Builder.SetInsertPoint(SeqCstBB);
731 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
732 llvm::SequentiallyConsistent);
733 Builder.CreateBr(ContBB);
734 SI->addCase(Builder.getInt32(5), SeqCstBB);
735
736 // Cleanup and return
737 Builder.SetInsertPoint(ContBB);
738 if (E->getType()->isVoidType())
739 return RValue::get(0);
Nick Lewycky2d84e842013-10-02 02:29:49 +0000740 return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000741}
John McCalla8ec7eb2013-03-07 21:37:17 +0000742
743llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
744 unsigned addrspace =
745 cast<llvm::PointerType>(addr->getType())->getAddressSpace();
746 llvm::IntegerType *ty =
747 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
748 return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
749}
750
751RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000752 AggValueSlot resultSlot,
753 SourceLocation loc) const {
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000754 if (EvaluationKind == TEK_Aggregate)
755 return resultSlot.asRValue();
John McCalla8ec7eb2013-03-07 21:37:17 +0000756
757 // Drill into the padding structure if we have one.
758 if (hasPadding())
759 addr = CGF.Builder.CreateStructGEP(addr, 0);
760
John McCalla8ec7eb2013-03-07 21:37:17 +0000761 // Otherwise, just convert the temporary to an r-value using the
762 // normal conversion routine.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000763 return CGF.convertTempToRValue(addr, getValueType(), loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000764}
765
766/// Emit a load from an l-value of atomic type. Note that the r-value
767/// we produce is an r-value of the atomic *value* type.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000768RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
769 AggValueSlot resultSlot) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000770 AtomicInfo atomics(*this, src);
771
772 // Check whether we should use a library call.
773 if (atomics.shouldUseLibcall()) {
774 llvm::Value *tempAddr;
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000775 if (!resultSlot.isIgnored()) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000776 assert(atomics.getEvaluationKind() == TEK_Aggregate);
777 tempAddr = resultSlot.getAddr();
778 } else {
779 tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
780 }
781
782 // void __atomic_load(size_t size, void *mem, void *return, int order);
783 CallArgList args;
784 args.add(RValue::get(atomics.getAtomicSizeValue()),
785 getContext().getSizeType());
786 args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
787 getContext().VoidPtrTy);
788 args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
789 getContext().VoidPtrTy);
790 args.add(RValue::get(llvm::ConstantInt::get(IntTy,
791 AO_ABI_memory_order_seq_cst)),
792 getContext().IntTy);
793 emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
794
795 // Produce the r-value.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000796 return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000797 }
798
799 // Okay, we're doing this natively.
800 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
801 llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
802 load->setAtomic(llvm::SequentiallyConsistent);
803
804 // Other decoration.
805 load->setAlignment(src.getAlignment().getQuantity());
806 if (src.isVolatileQualified())
807 load->setVolatile(true);
808 if (src.getTBAAInfo())
809 CGM.DecorateInstruction(load, src.getTBAAInfo());
810
811 // Okay, turn that back into the original value type.
812 QualType valueType = atomics.getValueType();
813 llvm::Value *result = load;
814
815 // If we're ignoring an aggregate return, don't do anything.
816 if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
817 return RValue::getAggregate(0, false);
818
819 // The easiest way to do this this is to go through memory, but we
820 // try not to in some easy cases.
821 if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
822 llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
823 if (isa<llvm::IntegerType>(resultTy)) {
824 assert(result->getType() == resultTy);
825 result = EmitFromMemory(result, valueType);
826 } else if (isa<llvm::PointerType>(resultTy)) {
827 result = Builder.CreateIntToPtr(result, resultTy);
828 } else {
829 result = Builder.CreateBitCast(result, resultTy);
830 }
831 return RValue::get(result);
832 }
833
834 // Create a temporary. This needs to be big enough to hold the
835 // atomic integer.
836 llvm::Value *temp;
837 bool tempIsVolatile = false;
838 CharUnits tempAlignment;
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000839 if (atomics.getEvaluationKind() == TEK_Aggregate) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000840 assert(!resultSlot.isIgnored());
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000841 temp = resultSlot.getAddr();
842 tempAlignment = atomics.getValueAlignment();
John McCalla8ec7eb2013-03-07 21:37:17 +0000843 tempIsVolatile = resultSlot.isVolatile();
844 } else {
845 temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
846 tempAlignment = atomics.getAtomicAlignment();
847 }
848
849 // Slam the integer into the temporary.
850 llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
851 Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
852 ->setVolatile(tempIsVolatile);
853
Nick Lewycky2d84e842013-10-02 02:29:49 +0000854 return atomics.convertTempToRValue(temp, resultSlot, loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000855}
856
857
858
859/// Copy an r-value into memory as part of storing to an atomic type.
860/// This needs to create a bit-pattern suitable for atomic operations.
861void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
862 // If we have an r-value, the rvalue should be of the atomic type,
863 // which means that the caller is responsible for having zeroed
864 // any padding. Just do an aggregate copy of that type.
865 if (rvalue.isAggregate()) {
866 CGF.EmitAggregateCopy(dest.getAddress(),
867 rvalue.getAggregateAddr(),
868 getAtomicType(),
869 (rvalue.isVolatileQualified()
870 || dest.isVolatileQualified()),
871 dest.getAlignment());
872 return;
873 }
874
875 // Okay, otherwise we're copying stuff.
876
877 // Zero out the buffer if necessary.
878 emitMemSetZeroIfNecessary(dest);
879
880 // Drill past the padding if present.
881 dest = projectValue(dest);
882
883 // Okay, store the rvalue in.
884 if (rvalue.isScalar()) {
885 CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
886 } else {
887 CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
888 }
889}
890
891
892/// Materialize an r-value into memory for the purposes of storing it
893/// to an atomic type.
894llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
895 // Aggregate r-values are already in memory, and EmitAtomicStore
896 // requires them to be values of the atomic type.
897 if (rvalue.isAggregate())
898 return rvalue.getAggregateAddr();
899
900 // Otherwise, make a temporary and materialize into it.
901 llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
902 LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
903 emitCopyIntoMemory(rvalue, tempLV);
904 return temp;
905}
906
907/// Emit a store to an l-value of atomic type.
908///
909/// Note that the r-value is expected to be an r-value *of the atomic
910/// type*; this means that for aggregate r-values, it should include
911/// storage for any padding that was necessary.
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000912void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000913 // If this is an aggregate r-value, it should agree in type except
914 // maybe for address-space qualification.
915 assert(!rvalue.isAggregate() ||
916 rvalue.getAggregateAddr()->getType()->getPointerElementType()
917 == dest.getAddress()->getType()->getPointerElementType());
918
919 AtomicInfo atomics(*this, dest);
920
921 // If this is an initialization, just put the value there normally.
922 if (isInit) {
923 atomics.emitCopyIntoMemory(rvalue, dest);
924 return;
925 }
926
927 // Check whether we should use a library call.
928 if (atomics.shouldUseLibcall()) {
929 // Produce a source address.
930 llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
931
932 // void __atomic_store(size_t size, void *mem, void *val, int order)
933 CallArgList args;
934 args.add(RValue::get(atomics.getAtomicSizeValue()),
935 getContext().getSizeType());
936 args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
937 getContext().VoidPtrTy);
938 args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
939 getContext().VoidPtrTy);
940 args.add(RValue::get(llvm::ConstantInt::get(IntTy,
941 AO_ABI_memory_order_seq_cst)),
942 getContext().IntTy);
943 emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
944 return;
945 }
946
947 // Okay, we're doing this natively.
948 llvm::Value *intValue;
949
950 // If we've got a scalar value of the right size, try to avoid going
951 // through memory.
952 if (rvalue.isScalar() && !atomics.hasPadding()) {
953 llvm::Value *value = rvalue.getScalarVal();
954 if (isa<llvm::IntegerType>(value->getType())) {
955 intValue = value;
956 } else {
957 llvm::IntegerType *inputIntTy =
958 llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
959 if (isa<llvm::PointerType>(value->getType())) {
960 intValue = Builder.CreatePtrToInt(value, inputIntTy);
961 } else {
962 intValue = Builder.CreateBitCast(value, inputIntTy);
963 }
964 }
965
966 // Otherwise, we need to go through memory.
967 } else {
968 // Put the r-value in memory.
969 llvm::Value *addr = atomics.materializeRValue(rvalue);
970
971 // Cast the temporary to the atomic int type and pull a value out.
972 addr = atomics.emitCastToAtomicIntPointer(addr);
973 intValue = Builder.CreateAlignedLoad(addr,
974 atomics.getAtomicAlignment().getQuantity());
975 }
976
977 // Do the atomic store.
978 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
979 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
980
981 // Initializations don't need to be atomic.
982 if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
983
984 // Other decoration.
985 store->setAlignment(dest.getAlignment().getQuantity());
986 if (dest.isVolatileQualified())
987 store->setVolatile(true);
988 if (dest.getTBAAInfo())
989 CGM.DecorateInstruction(store, dest.getTBAAInfo());
990}
991
992void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
993 AtomicInfo atomics(*this, dest);
994
995 switch (atomics.getEvaluationKind()) {
996 case TEK_Scalar: {
997 llvm::Value *value = EmitScalarExpr(init);
998 atomics.emitCopyIntoMemory(RValue::get(value), dest);
999 return;
1000 }
1001
1002 case TEK_Complex: {
1003 ComplexPairTy value = EmitComplexExpr(init);
1004 atomics.emitCopyIntoMemory(RValue::getComplex(value), dest);
1005 return;
1006 }
1007
1008 case TEK_Aggregate: {
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001009 // Fix up the destination if the initializer isn't an expression
1010 // of atomic type.
1011 bool Zeroed = false;
John McCalla8ec7eb2013-03-07 21:37:17 +00001012 if (!init->getType()->isAtomicType()) {
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001013 Zeroed = atomics.emitMemSetZeroIfNecessary(dest);
John McCalla8ec7eb2013-03-07 21:37:17 +00001014 dest = atomics.projectValue(dest);
1015 }
1016
1017 // Evaluate the expression directly into the destination.
1018 AggValueSlot slot = AggValueSlot::forLValue(dest,
1019 AggValueSlot::IsNotDestructed,
1020 AggValueSlot::DoesNotNeedGCBarriers,
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001021 AggValueSlot::IsNotAliased,
1022 Zeroed ? AggValueSlot::IsZeroed :
1023 AggValueSlot::IsNotZeroed);
1024
John McCalla8ec7eb2013-03-07 21:37:17 +00001025 EmitAggExpr(init, slot);
1026 return;
1027 }
1028 }
1029 llvm_unreachable("bad evaluation kind");
1030}