blob: ad4ba88dc15871f84922e3b4e1244990b0e7decd [file] [log] [blame]
John McCallfc207f22013-03-07 21:37:12 +00001//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the code for emitting atomic operations.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCall.h"
16#include "CodeGenModule.h"
17#include "clang/AST/ASTContext.h"
Mark Laceya8e7df32013-10-30 21:53:58 +000018#include "clang/CodeGen/CGFunctionInfo.h"
Ed Schoutenc7e82bd2013-05-31 19:27:59 +000019#include "llvm/ADT/StringExtras.h"
John McCallfc207f22013-03-07 21:37:12 +000020#include "llvm/IR/DataLayout.h"
21#include "llvm/IR/Intrinsics.h"
John McCalla8ec7eb2013-03-07 21:37:17 +000022#include "llvm/IR/Operator.h"
John McCallfc207f22013-03-07 21:37:12 +000023
24using namespace clang;
25using namespace CodeGen;
26
John McCalla8ec7eb2013-03-07 21:37:17 +000027namespace {
28 class AtomicInfo {
29 CodeGenFunction &CGF;
30 QualType AtomicTy;
31 QualType ValueTy;
32 uint64_t AtomicSizeInBits;
33 uint64_t ValueSizeInBits;
34 CharUnits AtomicAlign;
35 CharUnits ValueAlign;
36 CharUnits LValueAlign;
37 TypeEvaluationKind EvaluationKind;
38 bool UseLibcall;
39 public:
40 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
41 assert(lvalue.isSimple());
42
43 AtomicTy = lvalue.getType();
44 ValueTy = AtomicTy->castAs<AtomicType>()->getValueType();
45 EvaluationKind = CGF.getEvaluationKind(ValueTy);
46
47 ASTContext &C = CGF.getContext();
48
49 uint64_t valueAlignInBits;
Benjamin Kramer867ea1d2014-03-02 13:01:17 +000050 std::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy);
John McCalla8ec7eb2013-03-07 21:37:17 +000051
52 uint64_t atomicAlignInBits;
Benjamin Kramer867ea1d2014-03-02 13:01:17 +000053 std::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy);
John McCalla8ec7eb2013-03-07 21:37:17 +000054
55 assert(ValueSizeInBits <= AtomicSizeInBits);
56 assert(valueAlignInBits <= atomicAlignInBits);
57
58 AtomicAlign = C.toCharUnitsFromBits(atomicAlignInBits);
59 ValueAlign = C.toCharUnitsFromBits(valueAlignInBits);
60 if (lvalue.getAlignment().isZero())
61 lvalue.setAlignment(AtomicAlign);
62
63 UseLibcall =
64 (AtomicSizeInBits > uint64_t(C.toBits(lvalue.getAlignment())) ||
65 AtomicSizeInBits > C.getTargetInfo().getMaxAtomicInlineWidth());
66 }
67
68 QualType getAtomicType() const { return AtomicTy; }
69 QualType getValueType() const { return ValueTy; }
70 CharUnits getAtomicAlignment() const { return AtomicAlign; }
71 CharUnits getValueAlignment() const { return ValueAlign; }
72 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
73 uint64_t getValueSizeInBits() const { return AtomicSizeInBits; }
74 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
75 bool shouldUseLibcall() const { return UseLibcall; }
76
77 /// Is the atomic size larger than the underlying value type?
78 ///
79 /// Note that the absence of padding does not mean that atomic
80 /// objects are completely interchangeable with non-atomic
81 /// objects: we might have promoted the alignment of a type
82 /// without making it bigger.
83 bool hasPadding() const {
84 return (ValueSizeInBits != AtomicSizeInBits);
85 }
86
Eli Friedmanbe4504d2013-07-11 01:32:21 +000087 bool emitMemSetZeroIfNecessary(LValue dest) const;
John McCalla8ec7eb2013-03-07 21:37:17 +000088
89 llvm::Value *getAtomicSizeValue() const {
90 CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
91 return CGF.CGM.getSize(size);
92 }
93
94 /// Cast the given pointer to an integer pointer suitable for
95 /// atomic operations.
96 llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
97
98 /// Turn an atomic-layout object into an r-value.
99 RValue convertTempToRValue(llvm::Value *addr,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000100 AggValueSlot resultSlot,
101 SourceLocation loc) const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000102
103 /// Copy an atomic r-value into atomic-layout memory.
104 void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
105
106 /// Project an l-value down to the value field.
107 LValue projectValue(LValue lvalue) const {
108 llvm::Value *addr = lvalue.getAddress();
109 if (hasPadding())
110 addr = CGF.Builder.CreateStructGEP(addr, 0);
111
112 return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(),
113 CGF.getContext(), lvalue.getTBAAInfo());
114 }
115
116 /// Materialize an atomic r-value in atomic-layout memory.
117 llvm::Value *materializeRValue(RValue rvalue) const;
118
119 private:
120 bool requiresMemSetZero(llvm::Type *type) const;
121 };
122}
123
124static RValue emitAtomicLibcall(CodeGenFunction &CGF,
125 StringRef fnName,
126 QualType resultType,
127 CallArgList &args) {
128 const CGFunctionInfo &fnInfo =
129 CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
130 FunctionType::ExtInfo(), RequiredArgs::All);
131 llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
132 llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
133 return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
134}
135
136/// Does a store of the given IR type modify the full expected width?
137static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
138 uint64_t expectedSize) {
139 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
140}
141
142/// Does the atomic type require memsetting to zero before initialization?
143///
144/// The IR type is provided as a way of making certain queries faster.
145bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
146 // If the atomic type has size padding, we definitely need a memset.
147 if (hasPadding()) return true;
148
149 // Otherwise, do some simple heuristics to try to avoid it:
150 switch (getEvaluationKind()) {
151 // For scalars and complexes, check whether the store size of the
152 // type uses the full size.
153 case TEK_Scalar:
154 return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
155 case TEK_Complex:
156 return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
157 AtomicSizeInBits / 2);
158
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000159 // Padding in structs has an undefined bit pattern. User beware.
John McCalla8ec7eb2013-03-07 21:37:17 +0000160 case TEK_Aggregate:
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000161 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000162 }
163 llvm_unreachable("bad evaluation kind");
164}
165
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000166bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
John McCalla8ec7eb2013-03-07 21:37:17 +0000167 llvm::Value *addr = dest.getAddress();
168 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000169 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000170
171 CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
172 AtomicSizeInBits / 8,
173 dest.getAlignment().getQuantity());
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000174 return true;
John McCalla8ec7eb2013-03-07 21:37:17 +0000175}
176
Tim Northover9c177222014-03-13 19:25:48 +0000177static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E,
178 llvm::Value *Dest, llvm::Value *Ptr,
179 llvm::Value *Val1, llvm::Value *Val2,
180 uint64_t Size, unsigned Align,
181 llvm::AtomicOrdering SuccessOrder,
182 llvm::AtomicOrdering FailureOrder) {
183 // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
184 llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
185 Expected->setAlignment(Align);
186 llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
187 Desired->setAlignment(Align);
188
Tim Northoverb49b04b2014-06-13 14:24:59 +0000189 llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
Tim Northover9c177222014-03-13 19:25:48 +0000190 Ptr, Expected, Desired, SuccessOrder, FailureOrder);
Tim Northoverb49b04b2014-06-13 14:24:59 +0000191 Pair->setVolatile(E->isVolatile());
Tim Northover9c177222014-03-13 19:25:48 +0000192
193 // Cmp holds the result of the compare-exchange operation: true on success,
194 // false on failure.
Tim Northoverb49b04b2014-06-13 14:24:59 +0000195 llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
196 llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
Tim Northover9c177222014-03-13 19:25:48 +0000197
198 // This basic block is used to hold the store instruction if the operation
199 // failed.
200 llvm::BasicBlock *StoreExpectedBB =
201 CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
202
203 // This basic block is the exit point of the operation, we should end up
204 // here regardless of whether or not the operation succeeded.
205 llvm::BasicBlock *ContinueBB =
206 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
207
208 // Update Expected if Expected isn't equal to Old, otherwise branch to the
209 // exit point.
210 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
211
212 CGF.Builder.SetInsertPoint(StoreExpectedBB);
213 // Update the memory at Expected with Old's value.
214 llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
215 StoreExpected->setAlignment(Align);
216 // Finally, branch to the exit point.
217 CGF.Builder.CreateBr(ContinueBB);
218
219 CGF.Builder.SetInsertPoint(ContinueBB);
220 // Update the memory at Dest with Cmp's value.
221 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
222 return;
223}
224
225/// Given an ordering required on success, emit all possible cmpxchg
226/// instructions to cope with the provided (but possibly only dynamically known)
227/// FailureOrder.
228static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
229 llvm::Value *Dest, llvm::Value *Ptr,
230 llvm::Value *Val1, llvm::Value *Val2,
231 llvm::Value *FailureOrderVal,
232 uint64_t Size, unsigned Align,
233 llvm::AtomicOrdering SuccessOrder) {
234 llvm::AtomicOrdering FailureOrder;
235 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
236 switch (FO->getSExtValue()) {
237 default:
238 FailureOrder = llvm::Monotonic;
239 break;
240 case AtomicExpr::AO_ABI_memory_order_consume:
241 case AtomicExpr::AO_ABI_memory_order_acquire:
242 FailureOrder = llvm::Acquire;
243 break;
244 case AtomicExpr::AO_ABI_memory_order_seq_cst:
245 FailureOrder = llvm::SequentiallyConsistent;
246 break;
247 }
248 if (FailureOrder >= SuccessOrder) {
249 // Don't assert on undefined behaviour.
250 FailureOrder =
251 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
252 }
253 emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2, Size, Align, SuccessOrder,
254 FailureOrder);
255 return;
256 }
257
258 // Create all the relevant BB's
Craig Topper8a13c412014-05-21 05:09:00 +0000259 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
260 *SeqCstBB = nullptr;
Tim Northover9c177222014-03-13 19:25:48 +0000261 MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
262 if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
263 AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
264 if (SuccessOrder == llvm::SequentiallyConsistent)
265 SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
266
267 llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
268
269 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
270
271 // Emit all the different atomics
272
273 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
274 // doesn't matter unless someone is crazy enough to use something that
275 // doesn't fold to a constant for the ordering.
276 CGF.Builder.SetInsertPoint(MonotonicBB);
277 emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2,
278 Size, Align, SuccessOrder, llvm::Monotonic);
279 CGF.Builder.CreateBr(ContBB);
280
281 if (AcquireBB) {
282 CGF.Builder.SetInsertPoint(AcquireBB);
283 emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2,
284 Size, Align, SuccessOrder, llvm::Acquire);
285 CGF.Builder.CreateBr(ContBB);
286 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
287 AcquireBB);
288 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
289 AcquireBB);
290 }
291 if (SeqCstBB) {
292 CGF.Builder.SetInsertPoint(SeqCstBB);
293 emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2,
294 Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
295 CGF.Builder.CreateBr(ContBB);
296 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
297 SeqCstBB);
298 }
299
300 CGF.Builder.SetInsertPoint(ContBB);
301}
302
303static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
304 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
305 llvm::Value *FailureOrder, uint64_t Size,
306 unsigned Align, llvm::AtomicOrdering Order) {
John McCallfc207f22013-03-07 21:37:12 +0000307 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
308 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
309
310 switch (E->getOp()) {
311 case AtomicExpr::AO__c11_atomic_init:
312 llvm_unreachable("Already handled!");
313
314 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
315 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
316 case AtomicExpr::AO__atomic_compare_exchange:
Tim Northover9c177222014-03-13 19:25:48 +0000317 case AtomicExpr::AO__atomic_compare_exchange_n:
318 emitAtomicCmpXchgFailureSet(CGF, E, Dest, Ptr, Val1, Val2, FailureOrder,
319 Size, Align, Order);
John McCallfc207f22013-03-07 21:37:12 +0000320 return;
John McCallfc207f22013-03-07 21:37:12 +0000321 case AtomicExpr::AO__c11_atomic_load:
322 case AtomicExpr::AO__atomic_load_n:
323 case AtomicExpr::AO__atomic_load: {
324 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
325 Load->setAtomic(Order);
326 Load->setAlignment(Size);
327 Load->setVolatile(E->isVolatile());
328 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
329 StoreDest->setAlignment(Align);
330 return;
331 }
332
333 case AtomicExpr::AO__c11_atomic_store:
334 case AtomicExpr::AO__atomic_store:
335 case AtomicExpr::AO__atomic_store_n: {
336 assert(!Dest && "Store does not return a value");
337 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
338 LoadVal1->setAlignment(Align);
339 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
340 Store->setAtomic(Order);
341 Store->setAlignment(Size);
342 Store->setVolatile(E->isVolatile());
343 return;
344 }
345
346 case AtomicExpr::AO__c11_atomic_exchange:
347 case AtomicExpr::AO__atomic_exchange_n:
348 case AtomicExpr::AO__atomic_exchange:
349 Op = llvm::AtomicRMWInst::Xchg;
350 break;
351
352 case AtomicExpr::AO__atomic_add_fetch:
353 PostOp = llvm::Instruction::Add;
354 // Fall through.
355 case AtomicExpr::AO__c11_atomic_fetch_add:
356 case AtomicExpr::AO__atomic_fetch_add:
357 Op = llvm::AtomicRMWInst::Add;
358 break;
359
360 case AtomicExpr::AO__atomic_sub_fetch:
361 PostOp = llvm::Instruction::Sub;
362 // Fall through.
363 case AtomicExpr::AO__c11_atomic_fetch_sub:
364 case AtomicExpr::AO__atomic_fetch_sub:
365 Op = llvm::AtomicRMWInst::Sub;
366 break;
367
368 case AtomicExpr::AO__atomic_and_fetch:
369 PostOp = llvm::Instruction::And;
370 // Fall through.
371 case AtomicExpr::AO__c11_atomic_fetch_and:
372 case AtomicExpr::AO__atomic_fetch_and:
373 Op = llvm::AtomicRMWInst::And;
374 break;
375
376 case AtomicExpr::AO__atomic_or_fetch:
377 PostOp = llvm::Instruction::Or;
378 // Fall through.
379 case AtomicExpr::AO__c11_atomic_fetch_or:
380 case AtomicExpr::AO__atomic_fetch_or:
381 Op = llvm::AtomicRMWInst::Or;
382 break;
383
384 case AtomicExpr::AO__atomic_xor_fetch:
385 PostOp = llvm::Instruction::Xor;
386 // Fall through.
387 case AtomicExpr::AO__c11_atomic_fetch_xor:
388 case AtomicExpr::AO__atomic_fetch_xor:
389 Op = llvm::AtomicRMWInst::Xor;
390 break;
391
392 case AtomicExpr::AO__atomic_nand_fetch:
393 PostOp = llvm::Instruction::And;
394 // Fall through.
395 case AtomicExpr::AO__atomic_fetch_nand:
396 Op = llvm::AtomicRMWInst::Nand;
397 break;
398 }
399
400 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
401 LoadVal1->setAlignment(Align);
402 llvm::AtomicRMWInst *RMWI =
403 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
404 RMWI->setVolatile(E->isVolatile());
405
406 // For __atomic_*_fetch operations, perform the operation again to
407 // determine the value which was written.
408 llvm::Value *Result = RMWI;
409 if (PostOp)
410 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
411 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
412 Result = CGF.Builder.CreateNot(Result);
413 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
414 StoreDest->setAlignment(Align);
415}
416
417// This function emits any expression (scalar, complex, or aggregate)
418// into a temporary alloca.
419static llvm::Value *
420EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
421 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
422 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
423 /*Init*/ true);
424 return DeclPtr;
425}
426
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000427static void
428AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000429 bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
430 SourceLocation Loc) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000431 if (UseOptimizedLibcall) {
432 // Load value and pass it to the function directly.
433 unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
Nick Lewycky2d84e842013-10-02 02:29:49 +0000434 Val = CGF.EmitLoadOfScalar(Val, false, Align, ValTy, Loc);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000435 Args.add(RValue::get(Val), ValTy);
436 } else {
437 // Non-optimized functions always take a reference.
438 Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
439 CGF.getContext().VoidPtrTy);
440 }
441}
442
John McCallfc207f22013-03-07 21:37:12 +0000443RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
444 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
445 QualType MemTy = AtomicTy;
446 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
447 MemTy = AT->getValueType();
448 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
449 uint64_t Size = sizeChars.getQuantity();
450 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
451 unsigned Align = alignChars.getQuantity();
452 unsigned MaxInlineWidthInBits =
John McCallc8e01702013-04-16 22:48:15 +0000453 getTarget().getMaxAtomicInlineWidth();
John McCallfc207f22013-03-07 21:37:12 +0000454 bool UseLibcall = (Size != Align ||
455 getContext().toBits(sizeChars) > MaxInlineWidthInBits);
456
Craig Topper8a13c412014-05-21 05:09:00 +0000457 llvm::Value *OrderFail = nullptr, *Val1 = nullptr, *Val2 = nullptr;
458 llvm::Value *Ptr = EmitScalarExpr(E->getPtr());
John McCallfc207f22013-03-07 21:37:12 +0000459
460 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
461 assert(!Dest && "Init does not return a value");
John McCalla8ec7eb2013-03-07 21:37:17 +0000462 LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
463 EmitAtomicInit(E->getVal1(), lvalue);
Craig Topper8a13c412014-05-21 05:09:00 +0000464 return RValue::get(nullptr);
John McCallfc207f22013-03-07 21:37:12 +0000465 }
466
Craig Topper8a13c412014-05-21 05:09:00 +0000467 llvm::Value *Order = EmitScalarExpr(E->getOrder());
John McCallfc207f22013-03-07 21:37:12 +0000468
469 switch (E->getOp()) {
470 case AtomicExpr::AO__c11_atomic_init:
471 llvm_unreachable("Already handled!");
472
473 case AtomicExpr::AO__c11_atomic_load:
474 case AtomicExpr::AO__atomic_load_n:
475 break;
476
477 case AtomicExpr::AO__atomic_load:
478 Dest = EmitScalarExpr(E->getVal1());
479 break;
480
481 case AtomicExpr::AO__atomic_store:
482 Val1 = EmitScalarExpr(E->getVal1());
483 break;
484
485 case AtomicExpr::AO__atomic_exchange:
486 Val1 = EmitScalarExpr(E->getVal1());
487 Dest = EmitScalarExpr(E->getVal2());
488 break;
489
490 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
491 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
492 case AtomicExpr::AO__atomic_compare_exchange_n:
493 case AtomicExpr::AO__atomic_compare_exchange:
494 Val1 = EmitScalarExpr(E->getVal1());
495 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
496 Val2 = EmitScalarExpr(E->getVal2());
497 else
498 Val2 = EmitValToTemp(*this, E->getVal2());
499 OrderFail = EmitScalarExpr(E->getOrderFail());
500 // Evaluate and discard the 'weak' argument.
501 if (E->getNumSubExprs() == 6)
502 EmitScalarExpr(E->getWeak());
503 break;
504
505 case AtomicExpr::AO__c11_atomic_fetch_add:
506 case AtomicExpr::AO__c11_atomic_fetch_sub:
507 if (MemTy->isPointerType()) {
508 // For pointer arithmetic, we're required to do a bit of math:
509 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
510 // ... but only for the C11 builtins. The GNU builtins expect the
511 // user to multiply by sizeof(T).
512 QualType Val1Ty = E->getVal1()->getType();
513 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
514 CharUnits PointeeIncAmt =
515 getContext().getTypeSizeInChars(MemTy->getPointeeType());
516 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
517 Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
518 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
519 break;
520 }
521 // Fall through.
522 case AtomicExpr::AO__atomic_fetch_add:
523 case AtomicExpr::AO__atomic_fetch_sub:
524 case AtomicExpr::AO__atomic_add_fetch:
525 case AtomicExpr::AO__atomic_sub_fetch:
526 case AtomicExpr::AO__c11_atomic_store:
527 case AtomicExpr::AO__c11_atomic_exchange:
528 case AtomicExpr::AO__atomic_store_n:
529 case AtomicExpr::AO__atomic_exchange_n:
530 case AtomicExpr::AO__c11_atomic_fetch_and:
531 case AtomicExpr::AO__c11_atomic_fetch_or:
532 case AtomicExpr::AO__c11_atomic_fetch_xor:
533 case AtomicExpr::AO__atomic_fetch_and:
534 case AtomicExpr::AO__atomic_fetch_or:
535 case AtomicExpr::AO__atomic_fetch_xor:
536 case AtomicExpr::AO__atomic_fetch_nand:
537 case AtomicExpr::AO__atomic_and_fetch:
538 case AtomicExpr::AO__atomic_or_fetch:
539 case AtomicExpr::AO__atomic_xor_fetch:
540 case AtomicExpr::AO__atomic_nand_fetch:
541 Val1 = EmitValToTemp(*this, E->getVal1());
542 break;
543 }
544
545 if (!E->getType()->isVoidType() && !Dest)
546 Dest = CreateMemTemp(E->getType(), ".atomicdst");
547
548 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
549 if (UseLibcall) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000550 bool UseOptimizedLibcall = false;
551 switch (E->getOp()) {
552 case AtomicExpr::AO__c11_atomic_fetch_add:
553 case AtomicExpr::AO__atomic_fetch_add:
554 case AtomicExpr::AO__c11_atomic_fetch_and:
555 case AtomicExpr::AO__atomic_fetch_and:
556 case AtomicExpr::AO__c11_atomic_fetch_or:
557 case AtomicExpr::AO__atomic_fetch_or:
558 case AtomicExpr::AO__c11_atomic_fetch_sub:
559 case AtomicExpr::AO__atomic_fetch_sub:
560 case AtomicExpr::AO__c11_atomic_fetch_xor:
561 case AtomicExpr::AO__atomic_fetch_xor:
562 // For these, only library calls for certain sizes exist.
563 UseOptimizedLibcall = true;
564 break;
565 default:
566 // Only use optimized library calls for sizes for which they exist.
567 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
568 UseOptimizedLibcall = true;
569 break;
570 }
John McCallfc207f22013-03-07 21:37:12 +0000571
John McCallfc207f22013-03-07 21:37:12 +0000572 CallArgList Args;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000573 if (!UseOptimizedLibcall) {
574 // For non-optimized library calls, the size is the first parameter
575 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
576 getContext().getSizeType());
577 }
578 // Atomic address is the first or second parameter
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000579 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
John McCallfc207f22013-03-07 21:37:12 +0000580
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000581 std::string LibCallName;
Logan Chien74798a32014-03-26 17:35:01 +0000582 QualType LoweredMemTy =
583 MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000584 QualType RetTy;
585 bool HaveRetTy = false;
John McCallfc207f22013-03-07 21:37:12 +0000586 switch (E->getOp()) {
587 // There is only one libcall for compare an exchange, because there is no
588 // optimisation benefit possible from a libcall version of a weak compare
589 // and exchange.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000590 // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
John McCallfc207f22013-03-07 21:37:12 +0000591 // void *desired, int success, int failure)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000592 // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
593 // int success, int failure)
John McCallfc207f22013-03-07 21:37:12 +0000594 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
595 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
596 case AtomicExpr::AO__atomic_compare_exchange:
597 case AtomicExpr::AO__atomic_compare_exchange_n:
598 LibCallName = "__atomic_compare_exchange";
599 RetTy = getContext().BoolTy;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000600 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000601 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
602 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
603 E->getExprLoc());
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000604 Args.add(RValue::get(Order), getContext().IntTy);
John McCallfc207f22013-03-07 21:37:12 +0000605 Order = OrderFail;
606 break;
607 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
608 // int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000609 // T __atomic_exchange_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000610 case AtomicExpr::AO__c11_atomic_exchange:
611 case AtomicExpr::AO__atomic_exchange_n:
612 case AtomicExpr::AO__atomic_exchange:
613 LibCallName = "__atomic_exchange";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000614 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
615 E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000616 break;
617 // void __atomic_store(size_t size, void *mem, void *val, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000618 // void __atomic_store_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000619 case AtomicExpr::AO__c11_atomic_store:
620 case AtomicExpr::AO__atomic_store:
621 case AtomicExpr::AO__atomic_store_n:
622 LibCallName = "__atomic_store";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000623 RetTy = getContext().VoidTy;
624 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000625 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
626 E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000627 break;
628 // void __atomic_load(size_t size, void *mem, void *return, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000629 // T __atomic_load_N(T *mem, int order)
John McCallfc207f22013-03-07 21:37:12 +0000630 case AtomicExpr::AO__c11_atomic_load:
631 case AtomicExpr::AO__atomic_load:
632 case AtomicExpr::AO__atomic_load_n:
633 LibCallName = "__atomic_load";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000634 break;
635 // T __atomic_fetch_add_N(T *mem, T val, int order)
636 case AtomicExpr::AO__c11_atomic_fetch_add:
637 case AtomicExpr::AO__atomic_fetch_add:
638 LibCallName = "__atomic_fetch_add";
Logan Chien74798a32014-03-26 17:35:01 +0000639 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000640 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000641 break;
642 // T __atomic_fetch_and_N(T *mem, T val, int order)
643 case AtomicExpr::AO__c11_atomic_fetch_and:
644 case AtomicExpr::AO__atomic_fetch_and:
645 LibCallName = "__atomic_fetch_and";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000646 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
647 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000648 break;
649 // T __atomic_fetch_or_N(T *mem, T val, int order)
650 case AtomicExpr::AO__c11_atomic_fetch_or:
651 case AtomicExpr::AO__atomic_fetch_or:
652 LibCallName = "__atomic_fetch_or";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000653 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
654 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000655 break;
656 // T __atomic_fetch_sub_N(T *mem, T val, int order)
657 case AtomicExpr::AO__c11_atomic_fetch_sub:
658 case AtomicExpr::AO__atomic_fetch_sub:
659 LibCallName = "__atomic_fetch_sub";
Logan Chien74798a32014-03-26 17:35:01 +0000660 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000661 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000662 break;
663 // T __atomic_fetch_xor_N(T *mem, T val, int order)
664 case AtomicExpr::AO__c11_atomic_fetch_xor:
665 case AtomicExpr::AO__atomic_fetch_xor:
666 LibCallName = "__atomic_fetch_xor";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000667 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
668 E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000669 break;
John McCallfc207f22013-03-07 21:37:12 +0000670 default: return EmitUnsupportedRValue(E, "atomic library call");
671 }
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000672
673 // Optimized functions have the size in their name.
674 if (UseOptimizedLibcall)
675 LibCallName += "_" + llvm::utostr(Size);
676 // By default, assume we return a value of the atomic type.
677 if (!HaveRetTy) {
678 if (UseOptimizedLibcall) {
679 // Value is returned directly.
680 RetTy = MemTy;
681 } else {
682 // Value is returned through parameter before the order.
683 RetTy = getContext().VoidTy;
684 Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
685 getContext().VoidPtrTy);
686 }
687 }
John McCallfc207f22013-03-07 21:37:12 +0000688 // order is always the last parameter
689 Args.add(RValue::get(Order),
690 getContext().IntTy);
691
692 const CGFunctionInfo &FuncInfo =
693 CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
694 FunctionType::ExtInfo(), RequiredArgs::All);
695 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
696 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
697 RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000698 if (!RetTy->isVoidType())
John McCallfc207f22013-03-07 21:37:12 +0000699 return Res;
700 if (E->getType()->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +0000701 return RValue::get(nullptr);
Nick Lewycky2d84e842013-10-02 02:29:49 +0000702 return convertTempToRValue(Dest, E->getType(), E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000703 }
704
705 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
706 E->getOp() == AtomicExpr::AO__atomic_store ||
707 E->getOp() == AtomicExpr::AO__atomic_store_n;
708 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
709 E->getOp() == AtomicExpr::AO__atomic_load ||
710 E->getOp() == AtomicExpr::AO__atomic_load_n;
711
712 llvm::Type *IPtrTy =
713 llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
714 llvm::Value *OrigDest = Dest;
715 Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
716 if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
717 if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
718 if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
719
720 if (isa<llvm::ConstantInt>(Order)) {
721 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
722 switch (ord) {
Tim Northovere94a34c2014-03-11 10:49:14 +0000723 case AtomicExpr::AO_ABI_memory_order_relaxed:
Tim Northover9c177222014-03-13 19:25:48 +0000724 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
725 Size, Align, llvm::Monotonic);
John McCallfc207f22013-03-07 21:37:12 +0000726 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000727 case AtomicExpr::AO_ABI_memory_order_consume:
728 case AtomicExpr::AO_ABI_memory_order_acquire:
John McCallfc207f22013-03-07 21:37:12 +0000729 if (IsStore)
730 break; // Avoid crashing on code with undefined behavior
Tim Northover9c177222014-03-13 19:25:48 +0000731 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
732 Size, Align, llvm::Acquire);
John McCallfc207f22013-03-07 21:37:12 +0000733 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000734 case AtomicExpr::AO_ABI_memory_order_release:
John McCallfc207f22013-03-07 21:37:12 +0000735 if (IsLoad)
736 break; // Avoid crashing on code with undefined behavior
Tim Northover9c177222014-03-13 19:25:48 +0000737 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
738 Size, Align, llvm::Release);
John McCallfc207f22013-03-07 21:37:12 +0000739 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000740 case AtomicExpr::AO_ABI_memory_order_acq_rel:
John McCallfc207f22013-03-07 21:37:12 +0000741 if (IsLoad || IsStore)
742 break; // Avoid crashing on code with undefined behavior
Tim Northover9c177222014-03-13 19:25:48 +0000743 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
744 Size, Align, llvm::AcquireRelease);
John McCallfc207f22013-03-07 21:37:12 +0000745 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000746 case AtomicExpr::AO_ABI_memory_order_seq_cst:
Tim Northover9c177222014-03-13 19:25:48 +0000747 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
748 Size, Align, llvm::SequentiallyConsistent);
John McCallfc207f22013-03-07 21:37:12 +0000749 break;
750 default: // invalid order
751 // We should not ever get here normally, but it's hard to
752 // enforce that in general.
753 break;
754 }
755 if (E->getType()->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +0000756 return RValue::get(nullptr);
Nick Lewycky2d84e842013-10-02 02:29:49 +0000757 return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000758 }
759
760 // Long case, when Order isn't obviously constant.
761
762 // Create all the relevant BB's
Craig Topper8a13c412014-05-21 05:09:00 +0000763 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
764 *ReleaseBB = nullptr, *AcqRelBB = nullptr,
765 *SeqCstBB = nullptr;
John McCallfc207f22013-03-07 21:37:12 +0000766 MonotonicBB = createBasicBlock("monotonic", CurFn);
767 if (!IsStore)
768 AcquireBB = createBasicBlock("acquire", CurFn);
769 if (!IsLoad)
770 ReleaseBB = createBasicBlock("release", CurFn);
771 if (!IsLoad && !IsStore)
772 AcqRelBB = createBasicBlock("acqrel", CurFn);
773 SeqCstBB = createBasicBlock("seqcst", CurFn);
774 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
775
776 // Create the switch for the split
777 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
778 // doesn't matter unless someone is crazy enough to use something that
779 // doesn't fold to a constant for the ordering.
780 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
781 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
782
783 // Emit all the different atomics
784 Builder.SetInsertPoint(MonotonicBB);
Tim Northover9c177222014-03-13 19:25:48 +0000785 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
786 Size, Align, llvm::Monotonic);
John McCallfc207f22013-03-07 21:37:12 +0000787 Builder.CreateBr(ContBB);
788 if (!IsStore) {
789 Builder.SetInsertPoint(AcquireBB);
Tim Northover9c177222014-03-13 19:25:48 +0000790 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
791 Size, Align, llvm::Acquire);
John McCallfc207f22013-03-07 21:37:12 +0000792 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000793 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
794 AcquireBB);
795 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
796 AcquireBB);
John McCallfc207f22013-03-07 21:37:12 +0000797 }
798 if (!IsLoad) {
799 Builder.SetInsertPoint(ReleaseBB);
Tim Northover9c177222014-03-13 19:25:48 +0000800 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
801 Size, Align, llvm::Release);
John McCallfc207f22013-03-07 21:37:12 +0000802 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000803 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
804 ReleaseBB);
John McCallfc207f22013-03-07 21:37:12 +0000805 }
806 if (!IsLoad && !IsStore) {
807 Builder.SetInsertPoint(AcqRelBB);
Tim Northover9c177222014-03-13 19:25:48 +0000808 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
809 Size, Align, llvm::AcquireRelease);
John McCallfc207f22013-03-07 21:37:12 +0000810 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000811 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
812 AcqRelBB);
John McCallfc207f22013-03-07 21:37:12 +0000813 }
814 Builder.SetInsertPoint(SeqCstBB);
Tim Northover9c177222014-03-13 19:25:48 +0000815 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
816 Size, Align, llvm::SequentiallyConsistent);
John McCallfc207f22013-03-07 21:37:12 +0000817 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000818 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
819 SeqCstBB);
John McCallfc207f22013-03-07 21:37:12 +0000820
821 // Cleanup and return
822 Builder.SetInsertPoint(ContBB);
823 if (E->getType()->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +0000824 return RValue::get(nullptr);
Nick Lewycky2d84e842013-10-02 02:29:49 +0000825 return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000826}
John McCalla8ec7eb2013-03-07 21:37:17 +0000827
828llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
829 unsigned addrspace =
830 cast<llvm::PointerType>(addr->getType())->getAddressSpace();
831 llvm::IntegerType *ty =
832 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
833 return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
834}
835
836RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000837 AggValueSlot resultSlot,
838 SourceLocation loc) const {
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000839 if (EvaluationKind == TEK_Aggregate)
840 return resultSlot.asRValue();
John McCalla8ec7eb2013-03-07 21:37:17 +0000841
842 // Drill into the padding structure if we have one.
843 if (hasPadding())
844 addr = CGF.Builder.CreateStructGEP(addr, 0);
845
John McCalla8ec7eb2013-03-07 21:37:17 +0000846 // Otherwise, just convert the temporary to an r-value using the
847 // normal conversion routine.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000848 return CGF.convertTempToRValue(addr, getValueType(), loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000849}
850
851/// Emit a load from an l-value of atomic type. Note that the r-value
852/// we produce is an r-value of the atomic *value* type.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000853RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
854 AggValueSlot resultSlot) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000855 AtomicInfo atomics(*this, src);
856
857 // Check whether we should use a library call.
858 if (atomics.shouldUseLibcall()) {
859 llvm::Value *tempAddr;
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000860 if (!resultSlot.isIgnored()) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000861 assert(atomics.getEvaluationKind() == TEK_Aggregate);
862 tempAddr = resultSlot.getAddr();
863 } else {
864 tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
865 }
866
867 // void __atomic_load(size_t size, void *mem, void *return, int order);
868 CallArgList args;
869 args.add(RValue::get(atomics.getAtomicSizeValue()),
870 getContext().getSizeType());
871 args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
872 getContext().VoidPtrTy);
873 args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
874 getContext().VoidPtrTy);
Tim Northovere94a34c2014-03-11 10:49:14 +0000875 args.add(RValue::get(llvm::ConstantInt::get(
876 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
John McCalla8ec7eb2013-03-07 21:37:17 +0000877 getContext().IntTy);
878 emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
879
880 // Produce the r-value.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000881 return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000882 }
883
884 // Okay, we're doing this natively.
885 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
886 llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
887 load->setAtomic(llvm::SequentiallyConsistent);
888
889 // Other decoration.
890 load->setAlignment(src.getAlignment().getQuantity());
891 if (src.isVolatileQualified())
892 load->setVolatile(true);
893 if (src.getTBAAInfo())
894 CGM.DecorateInstruction(load, src.getTBAAInfo());
895
896 // Okay, turn that back into the original value type.
897 QualType valueType = atomics.getValueType();
898 llvm::Value *result = load;
899
900 // If we're ignoring an aggregate return, don't do anything.
901 if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
Craig Topper8a13c412014-05-21 05:09:00 +0000902 return RValue::getAggregate(nullptr, false);
John McCalla8ec7eb2013-03-07 21:37:17 +0000903
904 // The easiest way to do this this is to go through memory, but we
905 // try not to in some easy cases.
906 if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
907 llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
908 if (isa<llvm::IntegerType>(resultTy)) {
909 assert(result->getType() == resultTy);
910 result = EmitFromMemory(result, valueType);
911 } else if (isa<llvm::PointerType>(resultTy)) {
912 result = Builder.CreateIntToPtr(result, resultTy);
913 } else {
914 result = Builder.CreateBitCast(result, resultTy);
915 }
916 return RValue::get(result);
917 }
918
919 // Create a temporary. This needs to be big enough to hold the
920 // atomic integer.
921 llvm::Value *temp;
922 bool tempIsVolatile = false;
923 CharUnits tempAlignment;
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000924 if (atomics.getEvaluationKind() == TEK_Aggregate) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000925 assert(!resultSlot.isIgnored());
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000926 temp = resultSlot.getAddr();
927 tempAlignment = atomics.getValueAlignment();
John McCalla8ec7eb2013-03-07 21:37:17 +0000928 tempIsVolatile = resultSlot.isVolatile();
929 } else {
930 temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
931 tempAlignment = atomics.getAtomicAlignment();
932 }
933
934 // Slam the integer into the temporary.
935 llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
936 Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
937 ->setVolatile(tempIsVolatile);
938
Nick Lewycky2d84e842013-10-02 02:29:49 +0000939 return atomics.convertTempToRValue(temp, resultSlot, loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000940}
941
942
943
944/// Copy an r-value into memory as part of storing to an atomic type.
945/// This needs to create a bit-pattern suitable for atomic operations.
946void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
947 // If we have an r-value, the rvalue should be of the atomic type,
948 // which means that the caller is responsible for having zeroed
949 // any padding. Just do an aggregate copy of that type.
950 if (rvalue.isAggregate()) {
951 CGF.EmitAggregateCopy(dest.getAddress(),
952 rvalue.getAggregateAddr(),
953 getAtomicType(),
954 (rvalue.isVolatileQualified()
955 || dest.isVolatileQualified()),
956 dest.getAlignment());
957 return;
958 }
959
960 // Okay, otherwise we're copying stuff.
961
962 // Zero out the buffer if necessary.
963 emitMemSetZeroIfNecessary(dest);
964
965 // Drill past the padding if present.
966 dest = projectValue(dest);
967
968 // Okay, store the rvalue in.
969 if (rvalue.isScalar()) {
970 CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
971 } else {
972 CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
973 }
974}
975
976
977/// Materialize an r-value into memory for the purposes of storing it
978/// to an atomic type.
979llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
980 // Aggregate r-values are already in memory, and EmitAtomicStore
981 // requires them to be values of the atomic type.
982 if (rvalue.isAggregate())
983 return rvalue.getAggregateAddr();
984
985 // Otherwise, make a temporary and materialize into it.
986 llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
987 LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
988 emitCopyIntoMemory(rvalue, tempLV);
989 return temp;
990}
991
992/// Emit a store to an l-value of atomic type.
993///
994/// Note that the r-value is expected to be an r-value *of the atomic
995/// type*; this means that for aggregate r-values, it should include
996/// storage for any padding that was necessary.
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000997void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000998 // If this is an aggregate r-value, it should agree in type except
999 // maybe for address-space qualification.
1000 assert(!rvalue.isAggregate() ||
1001 rvalue.getAggregateAddr()->getType()->getPointerElementType()
1002 == dest.getAddress()->getType()->getPointerElementType());
1003
1004 AtomicInfo atomics(*this, dest);
1005
1006 // If this is an initialization, just put the value there normally.
1007 if (isInit) {
1008 atomics.emitCopyIntoMemory(rvalue, dest);
1009 return;
1010 }
1011
1012 // Check whether we should use a library call.
1013 if (atomics.shouldUseLibcall()) {
1014 // Produce a source address.
1015 llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
1016
1017 // void __atomic_store(size_t size, void *mem, void *val, int order)
1018 CallArgList args;
1019 args.add(RValue::get(atomics.getAtomicSizeValue()),
1020 getContext().getSizeType());
1021 args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
1022 getContext().VoidPtrTy);
1023 args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
1024 getContext().VoidPtrTy);
Tim Northovere94a34c2014-03-11 10:49:14 +00001025 args.add(RValue::get(llvm::ConstantInt::get(
1026 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
John McCalla8ec7eb2013-03-07 21:37:17 +00001027 getContext().IntTy);
1028 emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1029 return;
1030 }
1031
1032 // Okay, we're doing this natively.
1033 llvm::Value *intValue;
1034
1035 // If we've got a scalar value of the right size, try to avoid going
1036 // through memory.
1037 if (rvalue.isScalar() && !atomics.hasPadding()) {
1038 llvm::Value *value = rvalue.getScalarVal();
1039 if (isa<llvm::IntegerType>(value->getType())) {
1040 intValue = value;
1041 } else {
1042 llvm::IntegerType *inputIntTy =
1043 llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
1044 if (isa<llvm::PointerType>(value->getType())) {
1045 intValue = Builder.CreatePtrToInt(value, inputIntTy);
1046 } else {
1047 intValue = Builder.CreateBitCast(value, inputIntTy);
1048 }
1049 }
1050
1051 // Otherwise, we need to go through memory.
1052 } else {
1053 // Put the r-value in memory.
1054 llvm::Value *addr = atomics.materializeRValue(rvalue);
1055
1056 // Cast the temporary to the atomic int type and pull a value out.
1057 addr = atomics.emitCastToAtomicIntPointer(addr);
1058 intValue = Builder.CreateAlignedLoad(addr,
1059 atomics.getAtomicAlignment().getQuantity());
1060 }
1061
1062 // Do the atomic store.
1063 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
1064 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1065
1066 // Initializations don't need to be atomic.
1067 if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
1068
1069 // Other decoration.
1070 store->setAlignment(dest.getAlignment().getQuantity());
1071 if (dest.isVolatileQualified())
1072 store->setVolatile(true);
1073 if (dest.getTBAAInfo())
1074 CGM.DecorateInstruction(store, dest.getTBAAInfo());
1075}
1076
1077void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1078 AtomicInfo atomics(*this, dest);
1079
1080 switch (atomics.getEvaluationKind()) {
1081 case TEK_Scalar: {
1082 llvm::Value *value = EmitScalarExpr(init);
1083 atomics.emitCopyIntoMemory(RValue::get(value), dest);
1084 return;
1085 }
1086
1087 case TEK_Complex: {
1088 ComplexPairTy value = EmitComplexExpr(init);
1089 atomics.emitCopyIntoMemory(RValue::getComplex(value), dest);
1090 return;
1091 }
1092
1093 case TEK_Aggregate: {
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001094 // Fix up the destination if the initializer isn't an expression
1095 // of atomic type.
1096 bool Zeroed = false;
John McCalla8ec7eb2013-03-07 21:37:17 +00001097 if (!init->getType()->isAtomicType()) {
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001098 Zeroed = atomics.emitMemSetZeroIfNecessary(dest);
John McCalla8ec7eb2013-03-07 21:37:17 +00001099 dest = atomics.projectValue(dest);
1100 }
1101
1102 // Evaluate the expression directly into the destination.
1103 AggValueSlot slot = AggValueSlot::forLValue(dest,
1104 AggValueSlot::IsNotDestructed,
1105 AggValueSlot::DoesNotNeedGCBarriers,
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001106 AggValueSlot::IsNotAliased,
1107 Zeroed ? AggValueSlot::IsZeroed :
1108 AggValueSlot::IsNotZeroed);
1109
John McCalla8ec7eb2013-03-07 21:37:17 +00001110 EmitAggExpr(init, slot);
1111 return;
1112 }
1113 }
1114 llvm_unreachable("bad evaluation kind");
1115}