blob: dcb3ff10335e5cb73017263f0365e1cc46540b37 [file] [log] [blame]
John McCallfc207f22013-03-07 21:37:12 +00001//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the code for emitting atomic operations.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCall.h"
16#include "CodeGenModule.h"
17#include "clang/AST/ASTContext.h"
Mark Laceya8e7df32013-10-30 21:53:58 +000018#include "clang/CodeGen/CGFunctionInfo.h"
Ed Schoutenc7e82bd2013-05-31 19:27:59 +000019#include "llvm/ADT/StringExtras.h"
John McCallfc207f22013-03-07 21:37:12 +000020#include "llvm/IR/DataLayout.h"
21#include "llvm/IR/Intrinsics.h"
John McCalla8ec7eb2013-03-07 21:37:17 +000022#include "llvm/IR/Operator.h"
John McCallfc207f22013-03-07 21:37:12 +000023
24using namespace clang;
25using namespace CodeGen;
26
John McCalla8ec7eb2013-03-07 21:37:17 +000027namespace {
28 class AtomicInfo {
29 CodeGenFunction &CGF;
30 QualType AtomicTy;
31 QualType ValueTy;
32 uint64_t AtomicSizeInBits;
33 uint64_t ValueSizeInBits;
34 CharUnits AtomicAlign;
35 CharUnits ValueAlign;
36 CharUnits LValueAlign;
37 TypeEvaluationKind EvaluationKind;
38 bool UseLibcall;
39 public:
40 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
41 assert(lvalue.isSimple());
42
43 AtomicTy = lvalue.getType();
44 ValueTy = AtomicTy->castAs<AtomicType>()->getValueType();
45 EvaluationKind = CGF.getEvaluationKind(ValueTy);
46
47 ASTContext &C = CGF.getContext();
48
49 uint64_t valueAlignInBits;
Benjamin Kramer867ea1d2014-03-02 13:01:17 +000050 std::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy);
John McCalla8ec7eb2013-03-07 21:37:17 +000051
52 uint64_t atomicAlignInBits;
Benjamin Kramer867ea1d2014-03-02 13:01:17 +000053 std::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy);
John McCalla8ec7eb2013-03-07 21:37:17 +000054
55 assert(ValueSizeInBits <= AtomicSizeInBits);
56 assert(valueAlignInBits <= atomicAlignInBits);
57
58 AtomicAlign = C.toCharUnitsFromBits(atomicAlignInBits);
59 ValueAlign = C.toCharUnitsFromBits(valueAlignInBits);
60 if (lvalue.getAlignment().isZero())
61 lvalue.setAlignment(AtomicAlign);
62
63 UseLibcall =
64 (AtomicSizeInBits > uint64_t(C.toBits(lvalue.getAlignment())) ||
65 AtomicSizeInBits > C.getTargetInfo().getMaxAtomicInlineWidth());
66 }
67
68 QualType getAtomicType() const { return AtomicTy; }
69 QualType getValueType() const { return ValueTy; }
70 CharUnits getAtomicAlignment() const { return AtomicAlign; }
71 CharUnits getValueAlignment() const { return ValueAlign; }
72 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
73 uint64_t getValueSizeInBits() const { return AtomicSizeInBits; }
74 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
75 bool shouldUseLibcall() const { return UseLibcall; }
76
77 /// Is the atomic size larger than the underlying value type?
78 ///
79 /// Note that the absence of padding does not mean that atomic
80 /// objects are completely interchangeable with non-atomic
81 /// objects: we might have promoted the alignment of a type
82 /// without making it bigger.
83 bool hasPadding() const {
84 return (ValueSizeInBits != AtomicSizeInBits);
85 }
86
Eli Friedmanbe4504d2013-07-11 01:32:21 +000087 bool emitMemSetZeroIfNecessary(LValue dest) const;
John McCalla8ec7eb2013-03-07 21:37:17 +000088
89 llvm::Value *getAtomicSizeValue() const {
90 CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
91 return CGF.CGM.getSize(size);
92 }
93
94 /// Cast the given pointer to an integer pointer suitable for
95 /// atomic operations.
96 llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
97
98 /// Turn an atomic-layout object into an r-value.
99 RValue convertTempToRValue(llvm::Value *addr,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000100 AggValueSlot resultSlot,
101 SourceLocation loc) const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000102
103 /// Copy an atomic r-value into atomic-layout memory.
104 void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
105
106 /// Project an l-value down to the value field.
107 LValue projectValue(LValue lvalue) const {
108 llvm::Value *addr = lvalue.getAddress();
109 if (hasPadding())
110 addr = CGF.Builder.CreateStructGEP(addr, 0);
111
112 return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(),
113 CGF.getContext(), lvalue.getTBAAInfo());
114 }
115
116 /// Materialize an atomic r-value in atomic-layout memory.
117 llvm::Value *materializeRValue(RValue rvalue) const;
118
119 private:
120 bool requiresMemSetZero(llvm::Type *type) const;
121 };
122}
123
124static RValue emitAtomicLibcall(CodeGenFunction &CGF,
125 StringRef fnName,
126 QualType resultType,
127 CallArgList &args) {
128 const CGFunctionInfo &fnInfo =
129 CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
130 FunctionType::ExtInfo(), RequiredArgs::All);
131 llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
132 llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
133 return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
134}
135
136/// Does a store of the given IR type modify the full expected width?
137static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
138 uint64_t expectedSize) {
139 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
140}
141
142/// Does the atomic type require memsetting to zero before initialization?
143///
144/// The IR type is provided as a way of making certain queries faster.
145bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
146 // If the atomic type has size padding, we definitely need a memset.
147 if (hasPadding()) return true;
148
149 // Otherwise, do some simple heuristics to try to avoid it:
150 switch (getEvaluationKind()) {
151 // For scalars and complexes, check whether the store size of the
152 // type uses the full size.
153 case TEK_Scalar:
154 return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
155 case TEK_Complex:
156 return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
157 AtomicSizeInBits / 2);
158
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000159 // Padding in structs has an undefined bit pattern. User beware.
John McCalla8ec7eb2013-03-07 21:37:17 +0000160 case TEK_Aggregate:
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000161 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000162 }
163 llvm_unreachable("bad evaluation kind");
164}
165
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000166bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
John McCalla8ec7eb2013-03-07 21:37:17 +0000167 llvm::Value *addr = dest.getAddress();
168 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000169 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000170
171 CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
172 AtomicSizeInBits / 8,
173 dest.getAlignment().getQuantity());
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000174 return true;
John McCalla8ec7eb2013-03-07 21:37:17 +0000175}
176
John McCallfc207f22013-03-07 21:37:12 +0000177static void
178EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
179 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
180 uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
181 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
182 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
183
184 switch (E->getOp()) {
185 case AtomicExpr::AO__c11_atomic_init:
186 llvm_unreachable("Already handled!");
187
188 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
189 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
190 case AtomicExpr::AO__atomic_compare_exchange:
191 case AtomicExpr::AO__atomic_compare_exchange_n: {
192 // Note that cmpxchg only supports specifying one ordering and
193 // doesn't support weak cmpxchg, at least at the moment.
David Majnemer938bc1e2014-03-10 21:35:33 +0000194 llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
195 Expected->setAlignment(Align);
196 llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
197 Desired->setAlignment(Align);
Tim Northover0622b3a2014-03-11 10:49:03 +0000198 llvm::AtomicOrdering FailureOrder =
199 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Order);
200 llvm::AtomicCmpXchgInst *Old = CGF.Builder.CreateAtomicCmpXchg(
201 Ptr, Expected, Desired, Order, FailureOrder);
David Majnemer938bc1e2014-03-10 21:35:33 +0000202 Old->setVolatile(E->isVolatile());
203
204 // Cmp holds the result of the compare-exchange operation: true on success,
205 // false on failure.
206 llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(Old, Expected);
207
208 // This basic block is used to hold the store instruction if the operation
209 // failed.
210 llvm::BasicBlock *StoreExpectedBB =
211 CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
212
213 // This basic block is the exit point of the operation, we should end up
214 // here regardless of whether or not the operation succeeded.
215 llvm::BasicBlock *ContinueBB =
216 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
217
218 // Update Expected if Expected isn't equal to Old, otherwise branch to the
219 // exit point.
220 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
221
222 CGF.Builder.SetInsertPoint(StoreExpectedBB);
223 // Update the memory at Expected with Old's value.
224 llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
225 StoreExpected->setAlignment(Align);
226 // Finally, branch to the exit point.
227 CGF.Builder.CreateBr(ContinueBB);
228
229 CGF.Builder.SetInsertPoint(ContinueBB);
230 // Update the memory at Dest with Cmp's value.
John McCallfc207f22013-03-07 21:37:12 +0000231 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
232 return;
233 }
234
235 case AtomicExpr::AO__c11_atomic_load:
236 case AtomicExpr::AO__atomic_load_n:
237 case AtomicExpr::AO__atomic_load: {
238 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
239 Load->setAtomic(Order);
240 Load->setAlignment(Size);
241 Load->setVolatile(E->isVolatile());
242 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
243 StoreDest->setAlignment(Align);
244 return;
245 }
246
247 case AtomicExpr::AO__c11_atomic_store:
248 case AtomicExpr::AO__atomic_store:
249 case AtomicExpr::AO__atomic_store_n: {
250 assert(!Dest && "Store does not return a value");
251 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
252 LoadVal1->setAlignment(Align);
253 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
254 Store->setAtomic(Order);
255 Store->setAlignment(Size);
256 Store->setVolatile(E->isVolatile());
257 return;
258 }
259
260 case AtomicExpr::AO__c11_atomic_exchange:
261 case AtomicExpr::AO__atomic_exchange_n:
262 case AtomicExpr::AO__atomic_exchange:
263 Op = llvm::AtomicRMWInst::Xchg;
264 break;
265
266 case AtomicExpr::AO__atomic_add_fetch:
267 PostOp = llvm::Instruction::Add;
268 // Fall through.
269 case AtomicExpr::AO__c11_atomic_fetch_add:
270 case AtomicExpr::AO__atomic_fetch_add:
271 Op = llvm::AtomicRMWInst::Add;
272 break;
273
274 case AtomicExpr::AO__atomic_sub_fetch:
275 PostOp = llvm::Instruction::Sub;
276 // Fall through.
277 case AtomicExpr::AO__c11_atomic_fetch_sub:
278 case AtomicExpr::AO__atomic_fetch_sub:
279 Op = llvm::AtomicRMWInst::Sub;
280 break;
281
282 case AtomicExpr::AO__atomic_and_fetch:
283 PostOp = llvm::Instruction::And;
284 // Fall through.
285 case AtomicExpr::AO__c11_atomic_fetch_and:
286 case AtomicExpr::AO__atomic_fetch_and:
287 Op = llvm::AtomicRMWInst::And;
288 break;
289
290 case AtomicExpr::AO__atomic_or_fetch:
291 PostOp = llvm::Instruction::Or;
292 // Fall through.
293 case AtomicExpr::AO__c11_atomic_fetch_or:
294 case AtomicExpr::AO__atomic_fetch_or:
295 Op = llvm::AtomicRMWInst::Or;
296 break;
297
298 case AtomicExpr::AO__atomic_xor_fetch:
299 PostOp = llvm::Instruction::Xor;
300 // Fall through.
301 case AtomicExpr::AO__c11_atomic_fetch_xor:
302 case AtomicExpr::AO__atomic_fetch_xor:
303 Op = llvm::AtomicRMWInst::Xor;
304 break;
305
306 case AtomicExpr::AO__atomic_nand_fetch:
307 PostOp = llvm::Instruction::And;
308 // Fall through.
309 case AtomicExpr::AO__atomic_fetch_nand:
310 Op = llvm::AtomicRMWInst::Nand;
311 break;
312 }
313
314 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
315 LoadVal1->setAlignment(Align);
316 llvm::AtomicRMWInst *RMWI =
317 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
318 RMWI->setVolatile(E->isVolatile());
319
320 // For __atomic_*_fetch operations, perform the operation again to
321 // determine the value which was written.
322 llvm::Value *Result = RMWI;
323 if (PostOp)
324 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
325 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
326 Result = CGF.Builder.CreateNot(Result);
327 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
328 StoreDest->setAlignment(Align);
329}
330
331// This function emits any expression (scalar, complex, or aggregate)
332// into a temporary alloca.
333static llvm::Value *
334EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
335 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
336 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
337 /*Init*/ true);
338 return DeclPtr;
339}
340
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000341static void
342AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000343 bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
344 SourceLocation Loc) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000345 if (UseOptimizedLibcall) {
346 // Load value and pass it to the function directly.
347 unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
Nick Lewycky2d84e842013-10-02 02:29:49 +0000348 Val = CGF.EmitLoadOfScalar(Val, false, Align, ValTy, Loc);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000349 Args.add(RValue::get(Val), ValTy);
350 } else {
351 // Non-optimized functions always take a reference.
352 Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
353 CGF.getContext().VoidPtrTy);
354 }
355}
356
John McCallfc207f22013-03-07 21:37:12 +0000357RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
358 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
359 QualType MemTy = AtomicTy;
360 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
361 MemTy = AT->getValueType();
362 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
363 uint64_t Size = sizeChars.getQuantity();
364 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
365 unsigned Align = alignChars.getQuantity();
366 unsigned MaxInlineWidthInBits =
John McCallc8e01702013-04-16 22:48:15 +0000367 getTarget().getMaxAtomicInlineWidth();
John McCallfc207f22013-03-07 21:37:12 +0000368 bool UseLibcall = (Size != Align ||
369 getContext().toBits(sizeChars) > MaxInlineWidthInBits);
370
371 llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
372 Ptr = EmitScalarExpr(E->getPtr());
373
374 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
375 assert(!Dest && "Init does not return a value");
John McCalla8ec7eb2013-03-07 21:37:17 +0000376 LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
377 EmitAtomicInit(E->getVal1(), lvalue);
378 return RValue::get(0);
John McCallfc207f22013-03-07 21:37:12 +0000379 }
380
381 Order = EmitScalarExpr(E->getOrder());
382
383 switch (E->getOp()) {
384 case AtomicExpr::AO__c11_atomic_init:
385 llvm_unreachable("Already handled!");
386
387 case AtomicExpr::AO__c11_atomic_load:
388 case AtomicExpr::AO__atomic_load_n:
389 break;
390
391 case AtomicExpr::AO__atomic_load:
392 Dest = EmitScalarExpr(E->getVal1());
393 break;
394
395 case AtomicExpr::AO__atomic_store:
396 Val1 = EmitScalarExpr(E->getVal1());
397 break;
398
399 case AtomicExpr::AO__atomic_exchange:
400 Val1 = EmitScalarExpr(E->getVal1());
401 Dest = EmitScalarExpr(E->getVal2());
402 break;
403
404 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
405 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
406 case AtomicExpr::AO__atomic_compare_exchange_n:
407 case AtomicExpr::AO__atomic_compare_exchange:
408 Val1 = EmitScalarExpr(E->getVal1());
409 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
410 Val2 = EmitScalarExpr(E->getVal2());
411 else
412 Val2 = EmitValToTemp(*this, E->getVal2());
413 OrderFail = EmitScalarExpr(E->getOrderFail());
414 // Evaluate and discard the 'weak' argument.
415 if (E->getNumSubExprs() == 6)
416 EmitScalarExpr(E->getWeak());
417 break;
418
419 case AtomicExpr::AO__c11_atomic_fetch_add:
420 case AtomicExpr::AO__c11_atomic_fetch_sub:
421 if (MemTy->isPointerType()) {
422 // For pointer arithmetic, we're required to do a bit of math:
423 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
424 // ... but only for the C11 builtins. The GNU builtins expect the
425 // user to multiply by sizeof(T).
426 QualType Val1Ty = E->getVal1()->getType();
427 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
428 CharUnits PointeeIncAmt =
429 getContext().getTypeSizeInChars(MemTy->getPointeeType());
430 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
431 Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
432 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
433 break;
434 }
435 // Fall through.
436 case AtomicExpr::AO__atomic_fetch_add:
437 case AtomicExpr::AO__atomic_fetch_sub:
438 case AtomicExpr::AO__atomic_add_fetch:
439 case AtomicExpr::AO__atomic_sub_fetch:
440 case AtomicExpr::AO__c11_atomic_store:
441 case AtomicExpr::AO__c11_atomic_exchange:
442 case AtomicExpr::AO__atomic_store_n:
443 case AtomicExpr::AO__atomic_exchange_n:
444 case AtomicExpr::AO__c11_atomic_fetch_and:
445 case AtomicExpr::AO__c11_atomic_fetch_or:
446 case AtomicExpr::AO__c11_atomic_fetch_xor:
447 case AtomicExpr::AO__atomic_fetch_and:
448 case AtomicExpr::AO__atomic_fetch_or:
449 case AtomicExpr::AO__atomic_fetch_xor:
450 case AtomicExpr::AO__atomic_fetch_nand:
451 case AtomicExpr::AO__atomic_and_fetch:
452 case AtomicExpr::AO__atomic_or_fetch:
453 case AtomicExpr::AO__atomic_xor_fetch:
454 case AtomicExpr::AO__atomic_nand_fetch:
455 Val1 = EmitValToTemp(*this, E->getVal1());
456 break;
457 }
458
459 if (!E->getType()->isVoidType() && !Dest)
460 Dest = CreateMemTemp(E->getType(), ".atomicdst");
461
462 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
463 if (UseLibcall) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000464 bool UseOptimizedLibcall = false;
465 switch (E->getOp()) {
466 case AtomicExpr::AO__c11_atomic_fetch_add:
467 case AtomicExpr::AO__atomic_fetch_add:
468 case AtomicExpr::AO__c11_atomic_fetch_and:
469 case AtomicExpr::AO__atomic_fetch_and:
470 case AtomicExpr::AO__c11_atomic_fetch_or:
471 case AtomicExpr::AO__atomic_fetch_or:
472 case AtomicExpr::AO__c11_atomic_fetch_sub:
473 case AtomicExpr::AO__atomic_fetch_sub:
474 case AtomicExpr::AO__c11_atomic_fetch_xor:
475 case AtomicExpr::AO__atomic_fetch_xor:
476 // For these, only library calls for certain sizes exist.
477 UseOptimizedLibcall = true;
478 break;
479 default:
480 // Only use optimized library calls for sizes for which they exist.
481 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
482 UseOptimizedLibcall = true;
483 break;
484 }
John McCallfc207f22013-03-07 21:37:12 +0000485
John McCallfc207f22013-03-07 21:37:12 +0000486 CallArgList Args;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000487 if (!UseOptimizedLibcall) {
488 // For non-optimized library calls, the size is the first parameter
489 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
490 getContext().getSizeType());
491 }
492 // Atomic address is the first or second parameter
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000493 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
John McCallfc207f22013-03-07 21:37:12 +0000494
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000495 std::string LibCallName;
496 QualType RetTy;
497 bool HaveRetTy = false;
John McCallfc207f22013-03-07 21:37:12 +0000498 switch (E->getOp()) {
499 // There is only one libcall for compare an exchange, because there is no
500 // optimisation benefit possible from a libcall version of a weak compare
501 // and exchange.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000502 // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
John McCallfc207f22013-03-07 21:37:12 +0000503 // void *desired, int success, int failure)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000504 // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
505 // int success, int failure)
John McCallfc207f22013-03-07 21:37:12 +0000506 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
507 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
508 case AtomicExpr::AO__atomic_compare_exchange:
509 case AtomicExpr::AO__atomic_compare_exchange_n:
510 LibCallName = "__atomic_compare_exchange";
511 RetTy = getContext().BoolTy;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000512 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000513 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
514 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
515 E->getExprLoc());
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000516 Args.add(RValue::get(Order), getContext().IntTy);
John McCallfc207f22013-03-07 21:37:12 +0000517 Order = OrderFail;
518 break;
519 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
520 // int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000521 // T __atomic_exchange_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000522 case AtomicExpr::AO__c11_atomic_exchange:
523 case AtomicExpr::AO__atomic_exchange_n:
524 case AtomicExpr::AO__atomic_exchange:
525 LibCallName = "__atomic_exchange";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000526 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
527 E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000528 break;
529 // void __atomic_store(size_t size, void *mem, void *val, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000530 // void __atomic_store_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000531 case AtomicExpr::AO__c11_atomic_store:
532 case AtomicExpr::AO__atomic_store:
533 case AtomicExpr::AO__atomic_store_n:
534 LibCallName = "__atomic_store";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000535 RetTy = getContext().VoidTy;
536 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000537 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
538 E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000539 break;
540 // void __atomic_load(size_t size, void *mem, void *return, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000541 // T __atomic_load_N(T *mem, int order)
John McCallfc207f22013-03-07 21:37:12 +0000542 case AtomicExpr::AO__c11_atomic_load:
543 case AtomicExpr::AO__atomic_load:
544 case AtomicExpr::AO__atomic_load_n:
545 LibCallName = "__atomic_load";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000546 break;
547 // T __atomic_fetch_add_N(T *mem, T val, int order)
548 case AtomicExpr::AO__c11_atomic_fetch_add:
549 case AtomicExpr::AO__atomic_fetch_add:
550 LibCallName = "__atomic_fetch_add";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000551 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
552 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000553 break;
554 // T __atomic_fetch_and_N(T *mem, T val, int order)
555 case AtomicExpr::AO__c11_atomic_fetch_and:
556 case AtomicExpr::AO__atomic_fetch_and:
557 LibCallName = "__atomic_fetch_and";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000558 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
559 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000560 break;
561 // T __atomic_fetch_or_N(T *mem, T val, int order)
562 case AtomicExpr::AO__c11_atomic_fetch_or:
563 case AtomicExpr::AO__atomic_fetch_or:
564 LibCallName = "__atomic_fetch_or";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000565 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
566 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000567 break;
568 // T __atomic_fetch_sub_N(T *mem, T val, int order)
569 case AtomicExpr::AO__c11_atomic_fetch_sub:
570 case AtomicExpr::AO__atomic_fetch_sub:
571 LibCallName = "__atomic_fetch_sub";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000572 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
573 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000574 break;
575 // T __atomic_fetch_xor_N(T *mem, T val, int order)
576 case AtomicExpr::AO__c11_atomic_fetch_xor:
577 case AtomicExpr::AO__atomic_fetch_xor:
578 LibCallName = "__atomic_fetch_xor";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000579 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
580 E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000581 break;
John McCallfc207f22013-03-07 21:37:12 +0000582 default: return EmitUnsupportedRValue(E, "atomic library call");
583 }
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000584
585 // Optimized functions have the size in their name.
586 if (UseOptimizedLibcall)
587 LibCallName += "_" + llvm::utostr(Size);
588 // By default, assume we return a value of the atomic type.
589 if (!HaveRetTy) {
590 if (UseOptimizedLibcall) {
591 // Value is returned directly.
592 RetTy = MemTy;
593 } else {
594 // Value is returned through parameter before the order.
595 RetTy = getContext().VoidTy;
596 Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
597 getContext().VoidPtrTy);
598 }
599 }
John McCallfc207f22013-03-07 21:37:12 +0000600 // order is always the last parameter
601 Args.add(RValue::get(Order),
602 getContext().IntTy);
603
604 const CGFunctionInfo &FuncInfo =
605 CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
606 FunctionType::ExtInfo(), RequiredArgs::All);
607 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
608 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
609 RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000610 if (!RetTy->isVoidType())
John McCallfc207f22013-03-07 21:37:12 +0000611 return Res;
612 if (E->getType()->isVoidType())
613 return RValue::get(0);
Nick Lewycky2d84e842013-10-02 02:29:49 +0000614 return convertTempToRValue(Dest, E->getType(), E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000615 }
616
617 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
618 E->getOp() == AtomicExpr::AO__atomic_store ||
619 E->getOp() == AtomicExpr::AO__atomic_store_n;
620 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
621 E->getOp() == AtomicExpr::AO__atomic_load ||
622 E->getOp() == AtomicExpr::AO__atomic_load_n;
623
624 llvm::Type *IPtrTy =
625 llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
626 llvm::Value *OrigDest = Dest;
627 Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
628 if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
629 if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
630 if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
631
632 if (isa<llvm::ConstantInt>(Order)) {
633 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
634 switch (ord) {
Tim Northovere94a34c2014-03-11 10:49:14 +0000635 case AtomicExpr::AO_ABI_memory_order_relaxed:
John McCallfc207f22013-03-07 21:37:12 +0000636 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
637 llvm::Monotonic);
638 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000639 case AtomicExpr::AO_ABI_memory_order_consume:
640 case AtomicExpr::AO_ABI_memory_order_acquire:
John McCallfc207f22013-03-07 21:37:12 +0000641 if (IsStore)
642 break; // Avoid crashing on code with undefined behavior
643 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
644 llvm::Acquire);
645 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000646 case AtomicExpr::AO_ABI_memory_order_release:
John McCallfc207f22013-03-07 21:37:12 +0000647 if (IsLoad)
648 break; // Avoid crashing on code with undefined behavior
649 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
650 llvm::Release);
651 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000652 case AtomicExpr::AO_ABI_memory_order_acq_rel:
John McCallfc207f22013-03-07 21:37:12 +0000653 if (IsLoad || IsStore)
654 break; // Avoid crashing on code with undefined behavior
655 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
656 llvm::AcquireRelease);
657 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000658 case AtomicExpr::AO_ABI_memory_order_seq_cst:
John McCallfc207f22013-03-07 21:37:12 +0000659 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
660 llvm::SequentiallyConsistent);
661 break;
662 default: // invalid order
663 // We should not ever get here normally, but it's hard to
664 // enforce that in general.
665 break;
666 }
667 if (E->getType()->isVoidType())
668 return RValue::get(0);
Nick Lewycky2d84e842013-10-02 02:29:49 +0000669 return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000670 }
671
672 // Long case, when Order isn't obviously constant.
673
674 // Create all the relevant BB's
675 llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
676 *AcqRelBB = 0, *SeqCstBB = 0;
677 MonotonicBB = createBasicBlock("monotonic", CurFn);
678 if (!IsStore)
679 AcquireBB = createBasicBlock("acquire", CurFn);
680 if (!IsLoad)
681 ReleaseBB = createBasicBlock("release", CurFn);
682 if (!IsLoad && !IsStore)
683 AcqRelBB = createBasicBlock("acqrel", CurFn);
684 SeqCstBB = createBasicBlock("seqcst", CurFn);
685 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
686
687 // Create the switch for the split
688 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
689 // doesn't matter unless someone is crazy enough to use something that
690 // doesn't fold to a constant for the ordering.
691 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
692 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
693
694 // Emit all the different atomics
695 Builder.SetInsertPoint(MonotonicBB);
696 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
697 llvm::Monotonic);
698 Builder.CreateBr(ContBB);
699 if (!IsStore) {
700 Builder.SetInsertPoint(AcquireBB);
701 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
702 llvm::Acquire);
703 Builder.CreateBr(ContBB);
704 SI->addCase(Builder.getInt32(1), AcquireBB);
705 SI->addCase(Builder.getInt32(2), AcquireBB);
706 }
707 if (!IsLoad) {
708 Builder.SetInsertPoint(ReleaseBB);
709 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
710 llvm::Release);
711 Builder.CreateBr(ContBB);
712 SI->addCase(Builder.getInt32(3), ReleaseBB);
713 }
714 if (!IsLoad && !IsStore) {
715 Builder.SetInsertPoint(AcqRelBB);
716 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
717 llvm::AcquireRelease);
718 Builder.CreateBr(ContBB);
719 SI->addCase(Builder.getInt32(4), AcqRelBB);
720 }
721 Builder.SetInsertPoint(SeqCstBB);
722 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
723 llvm::SequentiallyConsistent);
724 Builder.CreateBr(ContBB);
725 SI->addCase(Builder.getInt32(5), SeqCstBB);
726
727 // Cleanup and return
728 Builder.SetInsertPoint(ContBB);
729 if (E->getType()->isVoidType())
730 return RValue::get(0);
Nick Lewycky2d84e842013-10-02 02:29:49 +0000731 return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000732}
John McCalla8ec7eb2013-03-07 21:37:17 +0000733
734llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
735 unsigned addrspace =
736 cast<llvm::PointerType>(addr->getType())->getAddressSpace();
737 llvm::IntegerType *ty =
738 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
739 return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
740}
741
742RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000743 AggValueSlot resultSlot,
744 SourceLocation loc) const {
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000745 if (EvaluationKind == TEK_Aggregate)
746 return resultSlot.asRValue();
John McCalla8ec7eb2013-03-07 21:37:17 +0000747
748 // Drill into the padding structure if we have one.
749 if (hasPadding())
750 addr = CGF.Builder.CreateStructGEP(addr, 0);
751
John McCalla8ec7eb2013-03-07 21:37:17 +0000752 // Otherwise, just convert the temporary to an r-value using the
753 // normal conversion routine.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000754 return CGF.convertTempToRValue(addr, getValueType(), loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000755}
756
757/// Emit a load from an l-value of atomic type. Note that the r-value
758/// we produce is an r-value of the atomic *value* type.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000759RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
760 AggValueSlot resultSlot) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000761 AtomicInfo atomics(*this, src);
762
763 // Check whether we should use a library call.
764 if (atomics.shouldUseLibcall()) {
765 llvm::Value *tempAddr;
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000766 if (!resultSlot.isIgnored()) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000767 assert(atomics.getEvaluationKind() == TEK_Aggregate);
768 tempAddr = resultSlot.getAddr();
769 } else {
770 tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
771 }
772
773 // void __atomic_load(size_t size, void *mem, void *return, int order);
774 CallArgList args;
775 args.add(RValue::get(atomics.getAtomicSizeValue()),
776 getContext().getSizeType());
777 args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
778 getContext().VoidPtrTy);
779 args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
780 getContext().VoidPtrTy);
Tim Northovere94a34c2014-03-11 10:49:14 +0000781 args.add(RValue::get(llvm::ConstantInt::get(
782 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
John McCalla8ec7eb2013-03-07 21:37:17 +0000783 getContext().IntTy);
784 emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
785
786 // Produce the r-value.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000787 return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000788 }
789
790 // Okay, we're doing this natively.
791 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
792 llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
793 load->setAtomic(llvm::SequentiallyConsistent);
794
795 // Other decoration.
796 load->setAlignment(src.getAlignment().getQuantity());
797 if (src.isVolatileQualified())
798 load->setVolatile(true);
799 if (src.getTBAAInfo())
800 CGM.DecorateInstruction(load, src.getTBAAInfo());
801
802 // Okay, turn that back into the original value type.
803 QualType valueType = atomics.getValueType();
804 llvm::Value *result = load;
805
806 // If we're ignoring an aggregate return, don't do anything.
807 if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
808 return RValue::getAggregate(0, false);
809
810 // The easiest way to do this this is to go through memory, but we
811 // try not to in some easy cases.
812 if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
813 llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
814 if (isa<llvm::IntegerType>(resultTy)) {
815 assert(result->getType() == resultTy);
816 result = EmitFromMemory(result, valueType);
817 } else if (isa<llvm::PointerType>(resultTy)) {
818 result = Builder.CreateIntToPtr(result, resultTy);
819 } else {
820 result = Builder.CreateBitCast(result, resultTy);
821 }
822 return RValue::get(result);
823 }
824
825 // Create a temporary. This needs to be big enough to hold the
826 // atomic integer.
827 llvm::Value *temp;
828 bool tempIsVolatile = false;
829 CharUnits tempAlignment;
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000830 if (atomics.getEvaluationKind() == TEK_Aggregate) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000831 assert(!resultSlot.isIgnored());
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000832 temp = resultSlot.getAddr();
833 tempAlignment = atomics.getValueAlignment();
John McCalla8ec7eb2013-03-07 21:37:17 +0000834 tempIsVolatile = resultSlot.isVolatile();
835 } else {
836 temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
837 tempAlignment = atomics.getAtomicAlignment();
838 }
839
840 // Slam the integer into the temporary.
841 llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
842 Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
843 ->setVolatile(tempIsVolatile);
844
Nick Lewycky2d84e842013-10-02 02:29:49 +0000845 return atomics.convertTempToRValue(temp, resultSlot, loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000846}
847
848
849
850/// Copy an r-value into memory as part of storing to an atomic type.
851/// This needs to create a bit-pattern suitable for atomic operations.
852void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
853 // If we have an r-value, the rvalue should be of the atomic type,
854 // which means that the caller is responsible for having zeroed
855 // any padding. Just do an aggregate copy of that type.
856 if (rvalue.isAggregate()) {
857 CGF.EmitAggregateCopy(dest.getAddress(),
858 rvalue.getAggregateAddr(),
859 getAtomicType(),
860 (rvalue.isVolatileQualified()
861 || dest.isVolatileQualified()),
862 dest.getAlignment());
863 return;
864 }
865
866 // Okay, otherwise we're copying stuff.
867
868 // Zero out the buffer if necessary.
869 emitMemSetZeroIfNecessary(dest);
870
871 // Drill past the padding if present.
872 dest = projectValue(dest);
873
874 // Okay, store the rvalue in.
875 if (rvalue.isScalar()) {
876 CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
877 } else {
878 CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
879 }
880}
881
882
883/// Materialize an r-value into memory for the purposes of storing it
884/// to an atomic type.
885llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
886 // Aggregate r-values are already in memory, and EmitAtomicStore
887 // requires them to be values of the atomic type.
888 if (rvalue.isAggregate())
889 return rvalue.getAggregateAddr();
890
891 // Otherwise, make a temporary and materialize into it.
892 llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
893 LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
894 emitCopyIntoMemory(rvalue, tempLV);
895 return temp;
896}
897
898/// Emit a store to an l-value of atomic type.
899///
900/// Note that the r-value is expected to be an r-value *of the atomic
901/// type*; this means that for aggregate r-values, it should include
902/// storage for any padding that was necessary.
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000903void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000904 // If this is an aggregate r-value, it should agree in type except
905 // maybe for address-space qualification.
906 assert(!rvalue.isAggregate() ||
907 rvalue.getAggregateAddr()->getType()->getPointerElementType()
908 == dest.getAddress()->getType()->getPointerElementType());
909
910 AtomicInfo atomics(*this, dest);
911
912 // If this is an initialization, just put the value there normally.
913 if (isInit) {
914 atomics.emitCopyIntoMemory(rvalue, dest);
915 return;
916 }
917
918 // Check whether we should use a library call.
919 if (atomics.shouldUseLibcall()) {
920 // Produce a source address.
921 llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
922
923 // void __atomic_store(size_t size, void *mem, void *val, int order)
924 CallArgList args;
925 args.add(RValue::get(atomics.getAtomicSizeValue()),
926 getContext().getSizeType());
927 args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
928 getContext().VoidPtrTy);
929 args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
930 getContext().VoidPtrTy);
Tim Northovere94a34c2014-03-11 10:49:14 +0000931 args.add(RValue::get(llvm::ConstantInt::get(
932 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
John McCalla8ec7eb2013-03-07 21:37:17 +0000933 getContext().IntTy);
934 emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
935 return;
936 }
937
938 // Okay, we're doing this natively.
939 llvm::Value *intValue;
940
941 // If we've got a scalar value of the right size, try to avoid going
942 // through memory.
943 if (rvalue.isScalar() && !atomics.hasPadding()) {
944 llvm::Value *value = rvalue.getScalarVal();
945 if (isa<llvm::IntegerType>(value->getType())) {
946 intValue = value;
947 } else {
948 llvm::IntegerType *inputIntTy =
949 llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
950 if (isa<llvm::PointerType>(value->getType())) {
951 intValue = Builder.CreatePtrToInt(value, inputIntTy);
952 } else {
953 intValue = Builder.CreateBitCast(value, inputIntTy);
954 }
955 }
956
957 // Otherwise, we need to go through memory.
958 } else {
959 // Put the r-value in memory.
960 llvm::Value *addr = atomics.materializeRValue(rvalue);
961
962 // Cast the temporary to the atomic int type and pull a value out.
963 addr = atomics.emitCastToAtomicIntPointer(addr);
964 intValue = Builder.CreateAlignedLoad(addr,
965 atomics.getAtomicAlignment().getQuantity());
966 }
967
968 // Do the atomic store.
969 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
970 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
971
972 // Initializations don't need to be atomic.
973 if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
974
975 // Other decoration.
976 store->setAlignment(dest.getAlignment().getQuantity());
977 if (dest.isVolatileQualified())
978 store->setVolatile(true);
979 if (dest.getTBAAInfo())
980 CGM.DecorateInstruction(store, dest.getTBAAInfo());
981}
982
983void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
984 AtomicInfo atomics(*this, dest);
985
986 switch (atomics.getEvaluationKind()) {
987 case TEK_Scalar: {
988 llvm::Value *value = EmitScalarExpr(init);
989 atomics.emitCopyIntoMemory(RValue::get(value), dest);
990 return;
991 }
992
993 case TEK_Complex: {
994 ComplexPairTy value = EmitComplexExpr(init);
995 atomics.emitCopyIntoMemory(RValue::getComplex(value), dest);
996 return;
997 }
998
999 case TEK_Aggregate: {
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001000 // Fix up the destination if the initializer isn't an expression
1001 // of atomic type.
1002 bool Zeroed = false;
John McCalla8ec7eb2013-03-07 21:37:17 +00001003 if (!init->getType()->isAtomicType()) {
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001004 Zeroed = atomics.emitMemSetZeroIfNecessary(dest);
John McCalla8ec7eb2013-03-07 21:37:17 +00001005 dest = atomics.projectValue(dest);
1006 }
1007
1008 // Evaluate the expression directly into the destination.
1009 AggValueSlot slot = AggValueSlot::forLValue(dest,
1010 AggValueSlot::IsNotDestructed,
1011 AggValueSlot::DoesNotNeedGCBarriers,
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001012 AggValueSlot::IsNotAliased,
1013 Zeroed ? AggValueSlot::IsZeroed :
1014 AggValueSlot::IsNotZeroed);
1015
John McCalla8ec7eb2013-03-07 21:37:17 +00001016 EmitAggExpr(init, slot);
1017 return;
1018 }
1019 }
1020 llvm_unreachable("bad evaluation kind");
1021}