blob: dedeb6a3e9736ba514f530cb4a89a0542561d509 [file] [log] [blame]
John McCallfafaaef2013-03-07 21:37:12 +00001//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the code for emitting atomic operations.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCall.h"
16#include "CodeGenModule.h"
17#include "clang/AST/ASTContext.h"
Ed Schoutene4692492013-05-31 19:27:59 +000018#include "llvm/ADT/StringExtras.h"
John McCallfafaaef2013-03-07 21:37:12 +000019#include "llvm/IR/DataLayout.h"
20#include "llvm/IR/Intrinsics.h"
John McCall9eda3ab2013-03-07 21:37:17 +000021#include "llvm/IR/Operator.h"
John McCallfafaaef2013-03-07 21:37:12 +000022
23using namespace clang;
24using namespace CodeGen;
25
John McCall9eda3ab2013-03-07 21:37:17 +000026// The ABI values for various atomic memory orderings.
27enum AtomicOrderingKind {
28 AO_ABI_memory_order_relaxed = 0,
29 AO_ABI_memory_order_consume = 1,
30 AO_ABI_memory_order_acquire = 2,
31 AO_ABI_memory_order_release = 3,
32 AO_ABI_memory_order_acq_rel = 4,
33 AO_ABI_memory_order_seq_cst = 5
34};
35
36namespace {
37 class AtomicInfo {
38 CodeGenFunction &CGF;
39 QualType AtomicTy;
40 QualType ValueTy;
41 uint64_t AtomicSizeInBits;
42 uint64_t ValueSizeInBits;
43 CharUnits AtomicAlign;
44 CharUnits ValueAlign;
45 CharUnits LValueAlign;
46 TypeEvaluationKind EvaluationKind;
47 bool UseLibcall;
48 public:
49 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
50 assert(lvalue.isSimple());
51
52 AtomicTy = lvalue.getType();
53 ValueTy = AtomicTy->castAs<AtomicType>()->getValueType();
54 EvaluationKind = CGF.getEvaluationKind(ValueTy);
55
56 ASTContext &C = CGF.getContext();
57
58 uint64_t valueAlignInBits;
59 llvm::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy);
60
61 uint64_t atomicAlignInBits;
62 llvm::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy);
63
64 assert(ValueSizeInBits <= AtomicSizeInBits);
65 assert(valueAlignInBits <= atomicAlignInBits);
66
67 AtomicAlign = C.toCharUnitsFromBits(atomicAlignInBits);
68 ValueAlign = C.toCharUnitsFromBits(valueAlignInBits);
69 if (lvalue.getAlignment().isZero())
70 lvalue.setAlignment(AtomicAlign);
71
72 UseLibcall =
73 (AtomicSizeInBits > uint64_t(C.toBits(lvalue.getAlignment())) ||
74 AtomicSizeInBits > C.getTargetInfo().getMaxAtomicInlineWidth());
75 }
76
77 QualType getAtomicType() const { return AtomicTy; }
78 QualType getValueType() const { return ValueTy; }
79 CharUnits getAtomicAlignment() const { return AtomicAlign; }
80 CharUnits getValueAlignment() const { return ValueAlign; }
81 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
82 uint64_t getValueSizeInBits() const { return AtomicSizeInBits; }
83 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
84 bool shouldUseLibcall() const { return UseLibcall; }
85
86 /// Is the atomic size larger than the underlying value type?
87 ///
88 /// Note that the absence of padding does not mean that atomic
89 /// objects are completely interchangeable with non-atomic
90 /// objects: we might have promoted the alignment of a type
91 /// without making it bigger.
92 bool hasPadding() const {
93 return (ValueSizeInBits != AtomicSizeInBits);
94 }
95
Eli Friedman336d9df2013-07-11 01:32:21 +000096 bool emitMemSetZeroIfNecessary(LValue dest) const;
John McCall9eda3ab2013-03-07 21:37:17 +000097
98 llvm::Value *getAtomicSizeValue() const {
99 CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
100 return CGF.CGM.getSize(size);
101 }
102
103 /// Cast the given pointer to an integer pointer suitable for
104 /// atomic operations.
105 llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
106
107 /// Turn an atomic-layout object into an r-value.
108 RValue convertTempToRValue(llvm::Value *addr,
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000109 AggValueSlot resultSlot,
110 SourceLocation loc) const;
John McCall9eda3ab2013-03-07 21:37:17 +0000111
112 /// Copy an atomic r-value into atomic-layout memory.
113 void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
114
115 /// Project an l-value down to the value field.
116 LValue projectValue(LValue lvalue) const {
117 llvm::Value *addr = lvalue.getAddress();
118 if (hasPadding())
119 addr = CGF.Builder.CreateStructGEP(addr, 0);
120
121 return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(),
122 CGF.getContext(), lvalue.getTBAAInfo());
123 }
124
125 /// Materialize an atomic r-value in atomic-layout memory.
126 llvm::Value *materializeRValue(RValue rvalue) const;
127
128 private:
129 bool requiresMemSetZero(llvm::Type *type) const;
130 };
131}
132
133static RValue emitAtomicLibcall(CodeGenFunction &CGF,
134 StringRef fnName,
135 QualType resultType,
136 CallArgList &args) {
137 const CGFunctionInfo &fnInfo =
138 CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
139 FunctionType::ExtInfo(), RequiredArgs::All);
140 llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
141 llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
142 return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
143}
144
145/// Does a store of the given IR type modify the full expected width?
146static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
147 uint64_t expectedSize) {
148 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
149}
150
151/// Does the atomic type require memsetting to zero before initialization?
152///
153/// The IR type is provided as a way of making certain queries faster.
154bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
155 // If the atomic type has size padding, we definitely need a memset.
156 if (hasPadding()) return true;
157
158 // Otherwise, do some simple heuristics to try to avoid it:
159 switch (getEvaluationKind()) {
160 // For scalars and complexes, check whether the store size of the
161 // type uses the full size.
162 case TEK_Scalar:
163 return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
164 case TEK_Complex:
165 return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
166 AtomicSizeInBits / 2);
167
Eli Friedman336d9df2013-07-11 01:32:21 +0000168 // Padding in structs has an undefined bit pattern. User beware.
John McCall9eda3ab2013-03-07 21:37:17 +0000169 case TEK_Aggregate:
Eli Friedman336d9df2013-07-11 01:32:21 +0000170 return false;
John McCall9eda3ab2013-03-07 21:37:17 +0000171 }
172 llvm_unreachable("bad evaluation kind");
173}
174
Eli Friedman336d9df2013-07-11 01:32:21 +0000175bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
John McCall9eda3ab2013-03-07 21:37:17 +0000176 llvm::Value *addr = dest.getAddress();
177 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
Eli Friedman336d9df2013-07-11 01:32:21 +0000178 return false;
John McCall9eda3ab2013-03-07 21:37:17 +0000179
180 CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
181 AtomicSizeInBits / 8,
182 dest.getAlignment().getQuantity());
Eli Friedman336d9df2013-07-11 01:32:21 +0000183 return true;
John McCall9eda3ab2013-03-07 21:37:17 +0000184}
185
John McCallfafaaef2013-03-07 21:37:12 +0000186static void
187EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
188 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
189 uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
190 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
191 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
192
193 switch (E->getOp()) {
194 case AtomicExpr::AO__c11_atomic_init:
195 llvm_unreachable("Already handled!");
196
197 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
198 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
199 case AtomicExpr::AO__atomic_compare_exchange:
200 case AtomicExpr::AO__atomic_compare_exchange_n: {
201 // Note that cmpxchg only supports specifying one ordering and
202 // doesn't support weak cmpxchg, at least at the moment.
203 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
204 LoadVal1->setAlignment(Align);
205 llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2);
206 LoadVal2->setAlignment(Align);
207 llvm::AtomicCmpXchgInst *CXI =
208 CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order);
209 CXI->setVolatile(E->isVolatile());
210 llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1);
211 StoreVal1->setAlignment(Align);
212 llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1);
213 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
214 return;
215 }
216
217 case AtomicExpr::AO__c11_atomic_load:
218 case AtomicExpr::AO__atomic_load_n:
219 case AtomicExpr::AO__atomic_load: {
220 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
221 Load->setAtomic(Order);
222 Load->setAlignment(Size);
223 Load->setVolatile(E->isVolatile());
224 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
225 StoreDest->setAlignment(Align);
226 return;
227 }
228
229 case AtomicExpr::AO__c11_atomic_store:
230 case AtomicExpr::AO__atomic_store:
231 case AtomicExpr::AO__atomic_store_n: {
232 assert(!Dest && "Store does not return a value");
233 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
234 LoadVal1->setAlignment(Align);
235 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
236 Store->setAtomic(Order);
237 Store->setAlignment(Size);
238 Store->setVolatile(E->isVolatile());
239 return;
240 }
241
242 case AtomicExpr::AO__c11_atomic_exchange:
243 case AtomicExpr::AO__atomic_exchange_n:
244 case AtomicExpr::AO__atomic_exchange:
245 Op = llvm::AtomicRMWInst::Xchg;
246 break;
247
248 case AtomicExpr::AO__atomic_add_fetch:
249 PostOp = llvm::Instruction::Add;
250 // Fall through.
251 case AtomicExpr::AO__c11_atomic_fetch_add:
252 case AtomicExpr::AO__atomic_fetch_add:
253 Op = llvm::AtomicRMWInst::Add;
254 break;
255
256 case AtomicExpr::AO__atomic_sub_fetch:
257 PostOp = llvm::Instruction::Sub;
258 // Fall through.
259 case AtomicExpr::AO__c11_atomic_fetch_sub:
260 case AtomicExpr::AO__atomic_fetch_sub:
261 Op = llvm::AtomicRMWInst::Sub;
262 break;
263
264 case AtomicExpr::AO__atomic_and_fetch:
265 PostOp = llvm::Instruction::And;
266 // Fall through.
267 case AtomicExpr::AO__c11_atomic_fetch_and:
268 case AtomicExpr::AO__atomic_fetch_and:
269 Op = llvm::AtomicRMWInst::And;
270 break;
271
272 case AtomicExpr::AO__atomic_or_fetch:
273 PostOp = llvm::Instruction::Or;
274 // Fall through.
275 case AtomicExpr::AO__c11_atomic_fetch_or:
276 case AtomicExpr::AO__atomic_fetch_or:
277 Op = llvm::AtomicRMWInst::Or;
278 break;
279
280 case AtomicExpr::AO__atomic_xor_fetch:
281 PostOp = llvm::Instruction::Xor;
282 // Fall through.
283 case AtomicExpr::AO__c11_atomic_fetch_xor:
284 case AtomicExpr::AO__atomic_fetch_xor:
285 Op = llvm::AtomicRMWInst::Xor;
286 break;
287
288 case AtomicExpr::AO__atomic_nand_fetch:
289 PostOp = llvm::Instruction::And;
290 // Fall through.
291 case AtomicExpr::AO__atomic_fetch_nand:
292 Op = llvm::AtomicRMWInst::Nand;
293 break;
294 }
295
296 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
297 LoadVal1->setAlignment(Align);
298 llvm::AtomicRMWInst *RMWI =
299 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
300 RMWI->setVolatile(E->isVolatile());
301
302 // For __atomic_*_fetch operations, perform the operation again to
303 // determine the value which was written.
304 llvm::Value *Result = RMWI;
305 if (PostOp)
306 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
307 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
308 Result = CGF.Builder.CreateNot(Result);
309 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
310 StoreDest->setAlignment(Align);
311}
312
313// This function emits any expression (scalar, complex, or aggregate)
314// into a temporary alloca.
315static llvm::Value *
316EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
317 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
318 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
319 /*Init*/ true);
320 return DeclPtr;
321}
322
Ed Schoutene4692492013-05-31 19:27:59 +0000323static void
324AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000325 bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
326 SourceLocation Loc) {
Ed Schoutene4692492013-05-31 19:27:59 +0000327 if (UseOptimizedLibcall) {
328 // Load value and pass it to the function directly.
329 unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000330 Val = CGF.EmitLoadOfScalar(Val, false, Align, ValTy, Loc);
Ed Schoutene4692492013-05-31 19:27:59 +0000331 Args.add(RValue::get(Val), ValTy);
332 } else {
333 // Non-optimized functions always take a reference.
334 Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
335 CGF.getContext().VoidPtrTy);
336 }
337}
338
John McCallfafaaef2013-03-07 21:37:12 +0000339RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
340 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
341 QualType MemTy = AtomicTy;
342 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
343 MemTy = AT->getValueType();
344 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
345 uint64_t Size = sizeChars.getQuantity();
346 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
347 unsigned Align = alignChars.getQuantity();
348 unsigned MaxInlineWidthInBits =
John McCall64aa4b32013-04-16 22:48:15 +0000349 getTarget().getMaxAtomicInlineWidth();
John McCallfafaaef2013-03-07 21:37:12 +0000350 bool UseLibcall = (Size != Align ||
351 getContext().toBits(sizeChars) > MaxInlineWidthInBits);
352
353 llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
354 Ptr = EmitScalarExpr(E->getPtr());
355
356 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
357 assert(!Dest && "Init does not return a value");
John McCall9eda3ab2013-03-07 21:37:17 +0000358 LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
359 EmitAtomicInit(E->getVal1(), lvalue);
360 return RValue::get(0);
John McCallfafaaef2013-03-07 21:37:12 +0000361 }
362
363 Order = EmitScalarExpr(E->getOrder());
364
365 switch (E->getOp()) {
366 case AtomicExpr::AO__c11_atomic_init:
367 llvm_unreachable("Already handled!");
368
369 case AtomicExpr::AO__c11_atomic_load:
370 case AtomicExpr::AO__atomic_load_n:
371 break;
372
373 case AtomicExpr::AO__atomic_load:
374 Dest = EmitScalarExpr(E->getVal1());
375 break;
376
377 case AtomicExpr::AO__atomic_store:
378 Val1 = EmitScalarExpr(E->getVal1());
379 break;
380
381 case AtomicExpr::AO__atomic_exchange:
382 Val1 = EmitScalarExpr(E->getVal1());
383 Dest = EmitScalarExpr(E->getVal2());
384 break;
385
386 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
387 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
388 case AtomicExpr::AO__atomic_compare_exchange_n:
389 case AtomicExpr::AO__atomic_compare_exchange:
390 Val1 = EmitScalarExpr(E->getVal1());
391 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
392 Val2 = EmitScalarExpr(E->getVal2());
393 else
394 Val2 = EmitValToTemp(*this, E->getVal2());
395 OrderFail = EmitScalarExpr(E->getOrderFail());
396 // Evaluate and discard the 'weak' argument.
397 if (E->getNumSubExprs() == 6)
398 EmitScalarExpr(E->getWeak());
399 break;
400
401 case AtomicExpr::AO__c11_atomic_fetch_add:
402 case AtomicExpr::AO__c11_atomic_fetch_sub:
403 if (MemTy->isPointerType()) {
404 // For pointer arithmetic, we're required to do a bit of math:
405 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
406 // ... but only for the C11 builtins. The GNU builtins expect the
407 // user to multiply by sizeof(T).
408 QualType Val1Ty = E->getVal1()->getType();
409 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
410 CharUnits PointeeIncAmt =
411 getContext().getTypeSizeInChars(MemTy->getPointeeType());
412 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
413 Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
414 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
415 break;
416 }
417 // Fall through.
418 case AtomicExpr::AO__atomic_fetch_add:
419 case AtomicExpr::AO__atomic_fetch_sub:
420 case AtomicExpr::AO__atomic_add_fetch:
421 case AtomicExpr::AO__atomic_sub_fetch:
422 case AtomicExpr::AO__c11_atomic_store:
423 case AtomicExpr::AO__c11_atomic_exchange:
424 case AtomicExpr::AO__atomic_store_n:
425 case AtomicExpr::AO__atomic_exchange_n:
426 case AtomicExpr::AO__c11_atomic_fetch_and:
427 case AtomicExpr::AO__c11_atomic_fetch_or:
428 case AtomicExpr::AO__c11_atomic_fetch_xor:
429 case AtomicExpr::AO__atomic_fetch_and:
430 case AtomicExpr::AO__atomic_fetch_or:
431 case AtomicExpr::AO__atomic_fetch_xor:
432 case AtomicExpr::AO__atomic_fetch_nand:
433 case AtomicExpr::AO__atomic_and_fetch:
434 case AtomicExpr::AO__atomic_or_fetch:
435 case AtomicExpr::AO__atomic_xor_fetch:
436 case AtomicExpr::AO__atomic_nand_fetch:
437 Val1 = EmitValToTemp(*this, E->getVal1());
438 break;
439 }
440
441 if (!E->getType()->isVoidType() && !Dest)
442 Dest = CreateMemTemp(E->getType(), ".atomicdst");
443
444 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
445 if (UseLibcall) {
Ed Schoutene4692492013-05-31 19:27:59 +0000446 bool UseOptimizedLibcall = false;
447 switch (E->getOp()) {
448 case AtomicExpr::AO__c11_atomic_fetch_add:
449 case AtomicExpr::AO__atomic_fetch_add:
450 case AtomicExpr::AO__c11_atomic_fetch_and:
451 case AtomicExpr::AO__atomic_fetch_and:
452 case AtomicExpr::AO__c11_atomic_fetch_or:
453 case AtomicExpr::AO__atomic_fetch_or:
454 case AtomicExpr::AO__c11_atomic_fetch_sub:
455 case AtomicExpr::AO__atomic_fetch_sub:
456 case AtomicExpr::AO__c11_atomic_fetch_xor:
457 case AtomicExpr::AO__atomic_fetch_xor:
458 // For these, only library calls for certain sizes exist.
459 UseOptimizedLibcall = true;
460 break;
461 default:
462 // Only use optimized library calls for sizes for which they exist.
463 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
464 UseOptimizedLibcall = true;
465 break;
466 }
John McCallfafaaef2013-03-07 21:37:12 +0000467
John McCallfafaaef2013-03-07 21:37:12 +0000468 CallArgList Args;
Ed Schoutene4692492013-05-31 19:27:59 +0000469 if (!UseOptimizedLibcall) {
470 // For non-optimized library calls, the size is the first parameter
471 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
472 getContext().getSizeType());
473 }
474 // Atomic address is the first or second parameter
Nick Lewycky5d4a7552013-10-01 21:51:38 +0000475 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
John McCallfafaaef2013-03-07 21:37:12 +0000476
Ed Schoutene4692492013-05-31 19:27:59 +0000477 std::string LibCallName;
478 QualType RetTy;
479 bool HaveRetTy = false;
John McCallfafaaef2013-03-07 21:37:12 +0000480 switch (E->getOp()) {
481 // There is only one libcall for compare an exchange, because there is no
482 // optimisation benefit possible from a libcall version of a weak compare
483 // and exchange.
Ed Schoutene4692492013-05-31 19:27:59 +0000484 // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
John McCallfafaaef2013-03-07 21:37:12 +0000485 // void *desired, int success, int failure)
Ed Schoutene4692492013-05-31 19:27:59 +0000486 // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
487 // int success, int failure)
John McCallfafaaef2013-03-07 21:37:12 +0000488 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
489 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
490 case AtomicExpr::AO__atomic_compare_exchange:
491 case AtomicExpr::AO__atomic_compare_exchange_n:
492 LibCallName = "__atomic_compare_exchange";
493 RetTy = getContext().BoolTy;
Ed Schoutene4692492013-05-31 19:27:59 +0000494 HaveRetTy = true;
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000495 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
496 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
497 E->getExprLoc());
Nick Lewycky5d4a7552013-10-01 21:51:38 +0000498 Args.add(RValue::get(Order), getContext().IntTy);
John McCallfafaaef2013-03-07 21:37:12 +0000499 Order = OrderFail;
500 break;
501 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
502 // int order)
Ed Schoutene4692492013-05-31 19:27:59 +0000503 // T __atomic_exchange_N(T *mem, T val, int order)
John McCallfafaaef2013-03-07 21:37:12 +0000504 case AtomicExpr::AO__c11_atomic_exchange:
505 case AtomicExpr::AO__atomic_exchange_n:
506 case AtomicExpr::AO__atomic_exchange:
507 LibCallName = "__atomic_exchange";
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000508 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
509 E->getExprLoc());
John McCallfafaaef2013-03-07 21:37:12 +0000510 break;
511 // void __atomic_store(size_t size, void *mem, void *val, int order)
Ed Schoutene4692492013-05-31 19:27:59 +0000512 // void __atomic_store_N(T *mem, T val, int order)
John McCallfafaaef2013-03-07 21:37:12 +0000513 case AtomicExpr::AO__c11_atomic_store:
514 case AtomicExpr::AO__atomic_store:
515 case AtomicExpr::AO__atomic_store_n:
516 LibCallName = "__atomic_store";
Ed Schoutene4692492013-05-31 19:27:59 +0000517 RetTy = getContext().VoidTy;
518 HaveRetTy = true;
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000519 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
520 E->getExprLoc());
John McCallfafaaef2013-03-07 21:37:12 +0000521 break;
522 // void __atomic_load(size_t size, void *mem, void *return, int order)
Ed Schoutene4692492013-05-31 19:27:59 +0000523 // T __atomic_load_N(T *mem, int order)
John McCallfafaaef2013-03-07 21:37:12 +0000524 case AtomicExpr::AO__c11_atomic_load:
525 case AtomicExpr::AO__atomic_load:
526 case AtomicExpr::AO__atomic_load_n:
527 LibCallName = "__atomic_load";
Ed Schoutene4692492013-05-31 19:27:59 +0000528 break;
529 // T __atomic_fetch_add_N(T *mem, T val, int order)
530 case AtomicExpr::AO__c11_atomic_fetch_add:
531 case AtomicExpr::AO__atomic_fetch_add:
532 LibCallName = "__atomic_fetch_add";
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000533 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
534 E->getExprLoc());
Ed Schoutene4692492013-05-31 19:27:59 +0000535 break;
536 // T __atomic_fetch_and_N(T *mem, T val, int order)
537 case AtomicExpr::AO__c11_atomic_fetch_and:
538 case AtomicExpr::AO__atomic_fetch_and:
539 LibCallName = "__atomic_fetch_and";
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000540 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
541 E->getExprLoc());
Ed Schoutene4692492013-05-31 19:27:59 +0000542 break;
543 // T __atomic_fetch_or_N(T *mem, T val, int order)
544 case AtomicExpr::AO__c11_atomic_fetch_or:
545 case AtomicExpr::AO__atomic_fetch_or:
546 LibCallName = "__atomic_fetch_or";
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000547 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
548 E->getExprLoc());
Ed Schoutene4692492013-05-31 19:27:59 +0000549 break;
550 // T __atomic_fetch_sub_N(T *mem, T val, int order)
551 case AtomicExpr::AO__c11_atomic_fetch_sub:
552 case AtomicExpr::AO__atomic_fetch_sub:
553 LibCallName = "__atomic_fetch_sub";
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000554 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
555 E->getExprLoc());
Ed Schoutene4692492013-05-31 19:27:59 +0000556 break;
557 // T __atomic_fetch_xor_N(T *mem, T val, int order)
558 case AtomicExpr::AO__c11_atomic_fetch_xor:
559 case AtomicExpr::AO__atomic_fetch_xor:
560 LibCallName = "__atomic_fetch_xor";
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000561 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
562 E->getExprLoc());
John McCallfafaaef2013-03-07 21:37:12 +0000563 break;
John McCallfafaaef2013-03-07 21:37:12 +0000564 default: return EmitUnsupportedRValue(E, "atomic library call");
565 }
Ed Schoutene4692492013-05-31 19:27:59 +0000566
567 // Optimized functions have the size in their name.
568 if (UseOptimizedLibcall)
569 LibCallName += "_" + llvm::utostr(Size);
570 // By default, assume we return a value of the atomic type.
571 if (!HaveRetTy) {
572 if (UseOptimizedLibcall) {
573 // Value is returned directly.
574 RetTy = MemTy;
575 } else {
576 // Value is returned through parameter before the order.
577 RetTy = getContext().VoidTy;
578 Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
579 getContext().VoidPtrTy);
580 }
581 }
John McCallfafaaef2013-03-07 21:37:12 +0000582 // order is always the last parameter
583 Args.add(RValue::get(Order),
584 getContext().IntTy);
585
586 const CGFunctionInfo &FuncInfo =
587 CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
588 FunctionType::ExtInfo(), RequiredArgs::All);
589 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
590 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
591 RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
Ed Schoutene4692492013-05-31 19:27:59 +0000592 if (!RetTy->isVoidType())
John McCallfafaaef2013-03-07 21:37:12 +0000593 return Res;
594 if (E->getType()->isVoidType())
595 return RValue::get(0);
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000596 return convertTempToRValue(Dest, E->getType(), E->getExprLoc());
John McCallfafaaef2013-03-07 21:37:12 +0000597 }
598
599 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
600 E->getOp() == AtomicExpr::AO__atomic_store ||
601 E->getOp() == AtomicExpr::AO__atomic_store_n;
602 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
603 E->getOp() == AtomicExpr::AO__atomic_load ||
604 E->getOp() == AtomicExpr::AO__atomic_load_n;
605
606 llvm::Type *IPtrTy =
607 llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
608 llvm::Value *OrigDest = Dest;
609 Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
610 if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
611 if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
612 if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
613
614 if (isa<llvm::ConstantInt>(Order)) {
615 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
616 switch (ord) {
John McCall9eda3ab2013-03-07 21:37:17 +0000617 case AO_ABI_memory_order_relaxed:
John McCallfafaaef2013-03-07 21:37:12 +0000618 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
619 llvm::Monotonic);
620 break;
John McCall9eda3ab2013-03-07 21:37:17 +0000621 case AO_ABI_memory_order_consume:
622 case AO_ABI_memory_order_acquire:
John McCallfafaaef2013-03-07 21:37:12 +0000623 if (IsStore)
624 break; // Avoid crashing on code with undefined behavior
625 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
626 llvm::Acquire);
627 break;
John McCall9eda3ab2013-03-07 21:37:17 +0000628 case AO_ABI_memory_order_release:
John McCallfafaaef2013-03-07 21:37:12 +0000629 if (IsLoad)
630 break; // Avoid crashing on code with undefined behavior
631 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
632 llvm::Release);
633 break;
John McCall9eda3ab2013-03-07 21:37:17 +0000634 case AO_ABI_memory_order_acq_rel:
John McCallfafaaef2013-03-07 21:37:12 +0000635 if (IsLoad || IsStore)
636 break; // Avoid crashing on code with undefined behavior
637 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
638 llvm::AcquireRelease);
639 break;
John McCall9eda3ab2013-03-07 21:37:17 +0000640 case AO_ABI_memory_order_seq_cst:
John McCallfafaaef2013-03-07 21:37:12 +0000641 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
642 llvm::SequentiallyConsistent);
643 break;
644 default: // invalid order
645 // We should not ever get here normally, but it's hard to
646 // enforce that in general.
647 break;
648 }
649 if (E->getType()->isVoidType())
650 return RValue::get(0);
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000651 return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
John McCallfafaaef2013-03-07 21:37:12 +0000652 }
653
654 // Long case, when Order isn't obviously constant.
655
656 // Create all the relevant BB's
657 llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
658 *AcqRelBB = 0, *SeqCstBB = 0;
659 MonotonicBB = createBasicBlock("monotonic", CurFn);
660 if (!IsStore)
661 AcquireBB = createBasicBlock("acquire", CurFn);
662 if (!IsLoad)
663 ReleaseBB = createBasicBlock("release", CurFn);
664 if (!IsLoad && !IsStore)
665 AcqRelBB = createBasicBlock("acqrel", CurFn);
666 SeqCstBB = createBasicBlock("seqcst", CurFn);
667 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
668
669 // Create the switch for the split
670 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
671 // doesn't matter unless someone is crazy enough to use something that
672 // doesn't fold to a constant for the ordering.
673 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
674 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
675
676 // Emit all the different atomics
677 Builder.SetInsertPoint(MonotonicBB);
678 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
679 llvm::Monotonic);
680 Builder.CreateBr(ContBB);
681 if (!IsStore) {
682 Builder.SetInsertPoint(AcquireBB);
683 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
684 llvm::Acquire);
685 Builder.CreateBr(ContBB);
686 SI->addCase(Builder.getInt32(1), AcquireBB);
687 SI->addCase(Builder.getInt32(2), AcquireBB);
688 }
689 if (!IsLoad) {
690 Builder.SetInsertPoint(ReleaseBB);
691 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
692 llvm::Release);
693 Builder.CreateBr(ContBB);
694 SI->addCase(Builder.getInt32(3), ReleaseBB);
695 }
696 if (!IsLoad && !IsStore) {
697 Builder.SetInsertPoint(AcqRelBB);
698 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
699 llvm::AcquireRelease);
700 Builder.CreateBr(ContBB);
701 SI->addCase(Builder.getInt32(4), AcqRelBB);
702 }
703 Builder.SetInsertPoint(SeqCstBB);
704 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
705 llvm::SequentiallyConsistent);
706 Builder.CreateBr(ContBB);
707 SI->addCase(Builder.getInt32(5), SeqCstBB);
708
709 // Cleanup and return
710 Builder.SetInsertPoint(ContBB);
711 if (E->getType()->isVoidType())
712 return RValue::get(0);
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000713 return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
John McCallfafaaef2013-03-07 21:37:12 +0000714}
John McCall9eda3ab2013-03-07 21:37:17 +0000715
716llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
717 unsigned addrspace =
718 cast<llvm::PointerType>(addr->getType())->getAddressSpace();
719 llvm::IntegerType *ty =
720 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
721 return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
722}
723
724RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000725 AggValueSlot resultSlot,
726 SourceLocation loc) const {
Eli Friedman336d9df2013-07-11 01:32:21 +0000727 if (EvaluationKind == TEK_Aggregate)
728 return resultSlot.asRValue();
John McCall9eda3ab2013-03-07 21:37:17 +0000729
730 // Drill into the padding structure if we have one.
731 if (hasPadding())
732 addr = CGF.Builder.CreateStructGEP(addr, 0);
733
John McCall9eda3ab2013-03-07 21:37:17 +0000734 // Otherwise, just convert the temporary to an r-value using the
735 // normal conversion routine.
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000736 return CGF.convertTempToRValue(addr, getValueType(), loc);
John McCall9eda3ab2013-03-07 21:37:17 +0000737}
738
739/// Emit a load from an l-value of atomic type. Note that the r-value
740/// we produce is an r-value of the atomic *value* type.
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000741RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
742 AggValueSlot resultSlot) {
John McCall9eda3ab2013-03-07 21:37:17 +0000743 AtomicInfo atomics(*this, src);
744
745 // Check whether we should use a library call.
746 if (atomics.shouldUseLibcall()) {
747 llvm::Value *tempAddr;
Eli Friedman336d9df2013-07-11 01:32:21 +0000748 if (!resultSlot.isIgnored()) {
John McCall9eda3ab2013-03-07 21:37:17 +0000749 assert(atomics.getEvaluationKind() == TEK_Aggregate);
750 tempAddr = resultSlot.getAddr();
751 } else {
752 tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
753 }
754
755 // void __atomic_load(size_t size, void *mem, void *return, int order);
756 CallArgList args;
757 args.add(RValue::get(atomics.getAtomicSizeValue()),
758 getContext().getSizeType());
759 args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
760 getContext().VoidPtrTy);
761 args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
762 getContext().VoidPtrTy);
763 args.add(RValue::get(llvm::ConstantInt::get(IntTy,
764 AO_ABI_memory_order_seq_cst)),
765 getContext().IntTy);
766 emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
767
768 // Produce the r-value.
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000769 return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
John McCall9eda3ab2013-03-07 21:37:17 +0000770 }
771
772 // Okay, we're doing this natively.
773 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
774 llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
775 load->setAtomic(llvm::SequentiallyConsistent);
776
777 // Other decoration.
778 load->setAlignment(src.getAlignment().getQuantity());
779 if (src.isVolatileQualified())
780 load->setVolatile(true);
781 if (src.getTBAAInfo())
782 CGM.DecorateInstruction(load, src.getTBAAInfo());
783
784 // Okay, turn that back into the original value type.
785 QualType valueType = atomics.getValueType();
786 llvm::Value *result = load;
787
788 // If we're ignoring an aggregate return, don't do anything.
789 if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
790 return RValue::getAggregate(0, false);
791
792 // The easiest way to do this this is to go through memory, but we
793 // try not to in some easy cases.
794 if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
795 llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
796 if (isa<llvm::IntegerType>(resultTy)) {
797 assert(result->getType() == resultTy);
798 result = EmitFromMemory(result, valueType);
799 } else if (isa<llvm::PointerType>(resultTy)) {
800 result = Builder.CreateIntToPtr(result, resultTy);
801 } else {
802 result = Builder.CreateBitCast(result, resultTy);
803 }
804 return RValue::get(result);
805 }
806
807 // Create a temporary. This needs to be big enough to hold the
808 // atomic integer.
809 llvm::Value *temp;
810 bool tempIsVolatile = false;
811 CharUnits tempAlignment;
Eli Friedman336d9df2013-07-11 01:32:21 +0000812 if (atomics.getEvaluationKind() == TEK_Aggregate) {
John McCall9eda3ab2013-03-07 21:37:17 +0000813 assert(!resultSlot.isIgnored());
Eli Friedman336d9df2013-07-11 01:32:21 +0000814 temp = resultSlot.getAddr();
815 tempAlignment = atomics.getValueAlignment();
John McCall9eda3ab2013-03-07 21:37:17 +0000816 tempIsVolatile = resultSlot.isVolatile();
817 } else {
818 temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
819 tempAlignment = atomics.getAtomicAlignment();
820 }
821
822 // Slam the integer into the temporary.
823 llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
824 Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
825 ->setVolatile(tempIsVolatile);
826
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000827 return atomics.convertTempToRValue(temp, resultSlot, loc);
John McCall9eda3ab2013-03-07 21:37:17 +0000828}
829
830
831
832/// Copy an r-value into memory as part of storing to an atomic type.
833/// This needs to create a bit-pattern suitable for atomic operations.
834void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
835 // If we have an r-value, the rvalue should be of the atomic type,
836 // which means that the caller is responsible for having zeroed
837 // any padding. Just do an aggregate copy of that type.
838 if (rvalue.isAggregate()) {
839 CGF.EmitAggregateCopy(dest.getAddress(),
840 rvalue.getAggregateAddr(),
841 getAtomicType(),
842 (rvalue.isVolatileQualified()
843 || dest.isVolatileQualified()),
844 dest.getAlignment());
845 return;
846 }
847
848 // Okay, otherwise we're copying stuff.
849
850 // Zero out the buffer if necessary.
851 emitMemSetZeroIfNecessary(dest);
852
853 // Drill past the padding if present.
854 dest = projectValue(dest);
855
856 // Okay, store the rvalue in.
857 if (rvalue.isScalar()) {
858 CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
859 } else {
860 CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
861 }
862}
863
864
865/// Materialize an r-value into memory for the purposes of storing it
866/// to an atomic type.
867llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
868 // Aggregate r-values are already in memory, and EmitAtomicStore
869 // requires them to be values of the atomic type.
870 if (rvalue.isAggregate())
871 return rvalue.getAggregateAddr();
872
873 // Otherwise, make a temporary and materialize into it.
874 llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
875 LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
876 emitCopyIntoMemory(rvalue, tempLV);
877 return temp;
878}
879
880/// Emit a store to an l-value of atomic type.
881///
882/// Note that the r-value is expected to be an r-value *of the atomic
883/// type*; this means that for aggregate r-values, it should include
884/// storage for any padding that was necessary.
Nick Lewycky5d4a7552013-10-01 21:51:38 +0000885void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
John McCall9eda3ab2013-03-07 21:37:17 +0000886 // If this is an aggregate r-value, it should agree in type except
887 // maybe for address-space qualification.
888 assert(!rvalue.isAggregate() ||
889 rvalue.getAggregateAddr()->getType()->getPointerElementType()
890 == dest.getAddress()->getType()->getPointerElementType());
891
892 AtomicInfo atomics(*this, dest);
893
894 // If this is an initialization, just put the value there normally.
895 if (isInit) {
896 atomics.emitCopyIntoMemory(rvalue, dest);
897 return;
898 }
899
900 // Check whether we should use a library call.
901 if (atomics.shouldUseLibcall()) {
902 // Produce a source address.
903 llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
904
905 // void __atomic_store(size_t size, void *mem, void *val, int order)
906 CallArgList args;
907 args.add(RValue::get(atomics.getAtomicSizeValue()),
908 getContext().getSizeType());
909 args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
910 getContext().VoidPtrTy);
911 args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
912 getContext().VoidPtrTy);
913 args.add(RValue::get(llvm::ConstantInt::get(IntTy,
914 AO_ABI_memory_order_seq_cst)),
915 getContext().IntTy);
916 emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
917 return;
918 }
919
920 // Okay, we're doing this natively.
921 llvm::Value *intValue;
922
923 // If we've got a scalar value of the right size, try to avoid going
924 // through memory.
925 if (rvalue.isScalar() && !atomics.hasPadding()) {
926 llvm::Value *value = rvalue.getScalarVal();
927 if (isa<llvm::IntegerType>(value->getType())) {
928 intValue = value;
929 } else {
930 llvm::IntegerType *inputIntTy =
931 llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
932 if (isa<llvm::PointerType>(value->getType())) {
933 intValue = Builder.CreatePtrToInt(value, inputIntTy);
934 } else {
935 intValue = Builder.CreateBitCast(value, inputIntTy);
936 }
937 }
938
939 // Otherwise, we need to go through memory.
940 } else {
941 // Put the r-value in memory.
942 llvm::Value *addr = atomics.materializeRValue(rvalue);
943
944 // Cast the temporary to the atomic int type and pull a value out.
945 addr = atomics.emitCastToAtomicIntPointer(addr);
946 intValue = Builder.CreateAlignedLoad(addr,
947 atomics.getAtomicAlignment().getQuantity());
948 }
949
950 // Do the atomic store.
951 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
952 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
953
954 // Initializations don't need to be atomic.
955 if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
956
957 // Other decoration.
958 store->setAlignment(dest.getAlignment().getQuantity());
959 if (dest.isVolatileQualified())
960 store->setVolatile(true);
961 if (dest.getTBAAInfo())
962 CGM.DecorateInstruction(store, dest.getTBAAInfo());
963}
964
965void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
966 AtomicInfo atomics(*this, dest);
967
968 switch (atomics.getEvaluationKind()) {
969 case TEK_Scalar: {
970 llvm::Value *value = EmitScalarExpr(init);
971 atomics.emitCopyIntoMemory(RValue::get(value), dest);
972 return;
973 }
974
975 case TEK_Complex: {
976 ComplexPairTy value = EmitComplexExpr(init);
977 atomics.emitCopyIntoMemory(RValue::getComplex(value), dest);
978 return;
979 }
980
981 case TEK_Aggregate: {
Eli Friedman336d9df2013-07-11 01:32:21 +0000982 // Fix up the destination if the initializer isn't an expression
983 // of atomic type.
984 bool Zeroed = false;
John McCall9eda3ab2013-03-07 21:37:17 +0000985 if (!init->getType()->isAtomicType()) {
Eli Friedman336d9df2013-07-11 01:32:21 +0000986 Zeroed = atomics.emitMemSetZeroIfNecessary(dest);
John McCall9eda3ab2013-03-07 21:37:17 +0000987 dest = atomics.projectValue(dest);
988 }
989
990 // Evaluate the expression directly into the destination.
991 AggValueSlot slot = AggValueSlot::forLValue(dest,
992 AggValueSlot::IsNotDestructed,
993 AggValueSlot::DoesNotNeedGCBarriers,
Eli Friedman336d9df2013-07-11 01:32:21 +0000994 AggValueSlot::IsNotAliased,
995 Zeroed ? AggValueSlot::IsZeroed :
996 AggValueSlot::IsNotZeroed);
997
John McCall9eda3ab2013-03-07 21:37:17 +0000998 EmitAggExpr(init, slot);
999 return;
1000 }
1001 }
1002 llvm_unreachable("bad evaluation kind");
1003}