blob: 89bde2ce20d90aa2922266842c38cef549f7363b [file] [log] [blame]
John McCallfc207f22013-03-07 21:37:12 +00001//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the code for emitting atomic operations.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCall.h"
16#include "CodeGenModule.h"
17#include "clang/AST/ASTContext.h"
Mark Laceya8e7df32013-10-30 21:53:58 +000018#include "clang/CodeGen/CGFunctionInfo.h"
Ed Schoutenc7e82bd2013-05-31 19:27:59 +000019#include "llvm/ADT/StringExtras.h"
John McCallfc207f22013-03-07 21:37:12 +000020#include "llvm/IR/DataLayout.h"
21#include "llvm/IR/Intrinsics.h"
John McCalla8ec7eb2013-03-07 21:37:17 +000022#include "llvm/IR/Operator.h"
John McCallfc207f22013-03-07 21:37:12 +000023
24using namespace clang;
25using namespace CodeGen;
26
John McCalla8ec7eb2013-03-07 21:37:17 +000027namespace {
28 class AtomicInfo {
29 CodeGenFunction &CGF;
30 QualType AtomicTy;
31 QualType ValueTy;
32 uint64_t AtomicSizeInBits;
33 uint64_t ValueSizeInBits;
34 CharUnits AtomicAlign;
35 CharUnits ValueAlign;
36 CharUnits LValueAlign;
37 TypeEvaluationKind EvaluationKind;
38 bool UseLibcall;
39 public:
40 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
41 assert(lvalue.isSimple());
42
43 AtomicTy = lvalue.getType();
44 ValueTy = AtomicTy->castAs<AtomicType>()->getValueType();
45 EvaluationKind = CGF.getEvaluationKind(ValueTy);
46
47 ASTContext &C = CGF.getContext();
48
49 uint64_t valueAlignInBits;
Benjamin Kramer867ea1d2014-03-02 13:01:17 +000050 std::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy);
John McCalla8ec7eb2013-03-07 21:37:17 +000051
52 uint64_t atomicAlignInBits;
Benjamin Kramer867ea1d2014-03-02 13:01:17 +000053 std::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy);
John McCalla8ec7eb2013-03-07 21:37:17 +000054
55 assert(ValueSizeInBits <= AtomicSizeInBits);
56 assert(valueAlignInBits <= atomicAlignInBits);
57
58 AtomicAlign = C.toCharUnitsFromBits(atomicAlignInBits);
59 ValueAlign = C.toCharUnitsFromBits(valueAlignInBits);
60 if (lvalue.getAlignment().isZero())
61 lvalue.setAlignment(AtomicAlign);
62
63 UseLibcall =
64 (AtomicSizeInBits > uint64_t(C.toBits(lvalue.getAlignment())) ||
65 AtomicSizeInBits > C.getTargetInfo().getMaxAtomicInlineWidth());
66 }
67
68 QualType getAtomicType() const { return AtomicTy; }
69 QualType getValueType() const { return ValueTy; }
70 CharUnits getAtomicAlignment() const { return AtomicAlign; }
71 CharUnits getValueAlignment() const { return ValueAlign; }
72 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
73 uint64_t getValueSizeInBits() const { return AtomicSizeInBits; }
74 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
75 bool shouldUseLibcall() const { return UseLibcall; }
76
77 /// Is the atomic size larger than the underlying value type?
78 ///
79 /// Note that the absence of padding does not mean that atomic
80 /// objects are completely interchangeable with non-atomic
81 /// objects: we might have promoted the alignment of a type
82 /// without making it bigger.
83 bool hasPadding() const {
84 return (ValueSizeInBits != AtomicSizeInBits);
85 }
86
Eli Friedmanbe4504d2013-07-11 01:32:21 +000087 bool emitMemSetZeroIfNecessary(LValue dest) const;
John McCalla8ec7eb2013-03-07 21:37:17 +000088
89 llvm::Value *getAtomicSizeValue() const {
90 CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
91 return CGF.CGM.getSize(size);
92 }
93
94 /// Cast the given pointer to an integer pointer suitable for
95 /// atomic operations.
96 llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
97
98 /// Turn an atomic-layout object into an r-value.
99 RValue convertTempToRValue(llvm::Value *addr,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000100 AggValueSlot resultSlot,
101 SourceLocation loc) const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000102
103 /// Copy an atomic r-value into atomic-layout memory.
104 void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
105
106 /// Project an l-value down to the value field.
107 LValue projectValue(LValue lvalue) const {
108 llvm::Value *addr = lvalue.getAddress();
109 if (hasPadding())
110 addr = CGF.Builder.CreateStructGEP(addr, 0);
111
112 return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(),
113 CGF.getContext(), lvalue.getTBAAInfo());
114 }
115
116 /// Materialize an atomic r-value in atomic-layout memory.
117 llvm::Value *materializeRValue(RValue rvalue) const;
118
119 private:
120 bool requiresMemSetZero(llvm::Type *type) const;
121 };
122}
123
124static RValue emitAtomicLibcall(CodeGenFunction &CGF,
125 StringRef fnName,
126 QualType resultType,
127 CallArgList &args) {
128 const CGFunctionInfo &fnInfo =
129 CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
130 FunctionType::ExtInfo(), RequiredArgs::All);
131 llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
132 llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
133 return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
134}
135
136/// Does a store of the given IR type modify the full expected width?
137static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
138 uint64_t expectedSize) {
139 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
140}
141
142/// Does the atomic type require memsetting to zero before initialization?
143///
144/// The IR type is provided as a way of making certain queries faster.
145bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
146 // If the atomic type has size padding, we definitely need a memset.
147 if (hasPadding()) return true;
148
149 // Otherwise, do some simple heuristics to try to avoid it:
150 switch (getEvaluationKind()) {
151 // For scalars and complexes, check whether the store size of the
152 // type uses the full size.
153 case TEK_Scalar:
154 return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
155 case TEK_Complex:
156 return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
157 AtomicSizeInBits / 2);
158
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000159 // Padding in structs has an undefined bit pattern. User beware.
John McCalla8ec7eb2013-03-07 21:37:17 +0000160 case TEK_Aggregate:
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000161 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000162 }
163 llvm_unreachable("bad evaluation kind");
164}
165
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000166bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
John McCalla8ec7eb2013-03-07 21:37:17 +0000167 llvm::Value *addr = dest.getAddress();
168 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000169 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000170
171 CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
172 AtomicSizeInBits / 8,
173 dest.getAlignment().getQuantity());
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000174 return true;
John McCalla8ec7eb2013-03-07 21:37:17 +0000175}
176
Tim Northovercadbbe12014-06-13 19:43:04 +0000177static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
Tim Northover9c177222014-03-13 19:25:48 +0000178 llvm::Value *Dest, llvm::Value *Ptr,
179 llvm::Value *Val1, llvm::Value *Val2,
180 uint64_t Size, unsigned Align,
181 llvm::AtomicOrdering SuccessOrder,
182 llvm::AtomicOrdering FailureOrder) {
183 // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
184 llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
185 Expected->setAlignment(Align);
186 llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
187 Desired->setAlignment(Align);
188
Tim Northoverb49b04b2014-06-13 14:24:59 +0000189 llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
Tim Northover9c177222014-03-13 19:25:48 +0000190 Ptr, Expected, Desired, SuccessOrder, FailureOrder);
Tim Northoverb49b04b2014-06-13 14:24:59 +0000191 Pair->setVolatile(E->isVolatile());
Tim Northovercadbbe12014-06-13 19:43:04 +0000192 Pair->setWeak(IsWeak);
Tim Northover9c177222014-03-13 19:25:48 +0000193
194 // Cmp holds the result of the compare-exchange operation: true on success,
195 // false on failure.
Tim Northoverb49b04b2014-06-13 14:24:59 +0000196 llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
197 llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
Tim Northover9c177222014-03-13 19:25:48 +0000198
199 // This basic block is used to hold the store instruction if the operation
200 // failed.
201 llvm::BasicBlock *StoreExpectedBB =
202 CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
203
204 // This basic block is the exit point of the operation, we should end up
205 // here regardless of whether or not the operation succeeded.
206 llvm::BasicBlock *ContinueBB =
207 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
208
209 // Update Expected if Expected isn't equal to Old, otherwise branch to the
210 // exit point.
211 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
212
213 CGF.Builder.SetInsertPoint(StoreExpectedBB);
214 // Update the memory at Expected with Old's value.
215 llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
216 StoreExpected->setAlignment(Align);
217 // Finally, branch to the exit point.
218 CGF.Builder.CreateBr(ContinueBB);
219
220 CGF.Builder.SetInsertPoint(ContinueBB);
221 // Update the memory at Dest with Cmp's value.
222 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
223 return;
224}
225
226/// Given an ordering required on success, emit all possible cmpxchg
227/// instructions to cope with the provided (but possibly only dynamically known)
228/// FailureOrder.
229static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
Tim Northovercadbbe12014-06-13 19:43:04 +0000230 bool IsWeak, llvm::Value *Dest,
231 llvm::Value *Ptr, llvm::Value *Val1,
232 llvm::Value *Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000233 llvm::Value *FailureOrderVal,
234 uint64_t Size, unsigned Align,
235 llvm::AtomicOrdering SuccessOrder) {
236 llvm::AtomicOrdering FailureOrder;
237 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
238 switch (FO->getSExtValue()) {
239 default:
240 FailureOrder = llvm::Monotonic;
241 break;
242 case AtomicExpr::AO_ABI_memory_order_consume:
243 case AtomicExpr::AO_ABI_memory_order_acquire:
244 FailureOrder = llvm::Acquire;
245 break;
246 case AtomicExpr::AO_ABI_memory_order_seq_cst:
247 FailureOrder = llvm::SequentiallyConsistent;
248 break;
249 }
250 if (FailureOrder >= SuccessOrder) {
251 // Don't assert on undefined behaviour.
252 FailureOrder =
253 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
254 }
Tim Northovercadbbe12014-06-13 19:43:04 +0000255 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align,
256 SuccessOrder, FailureOrder);
Tim Northover9c177222014-03-13 19:25:48 +0000257 return;
258 }
259
260 // Create all the relevant BB's
Craig Topper8a13c412014-05-21 05:09:00 +0000261 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
262 *SeqCstBB = nullptr;
Tim Northover9c177222014-03-13 19:25:48 +0000263 MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
264 if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
265 AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
266 if (SuccessOrder == llvm::SequentiallyConsistent)
267 SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
268
269 llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
270
271 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
272
273 // Emit all the different atomics
274
275 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
276 // doesn't matter unless someone is crazy enough to use something that
277 // doesn't fold to a constant for the ordering.
278 CGF.Builder.SetInsertPoint(MonotonicBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000279 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000280 Size, Align, SuccessOrder, llvm::Monotonic);
281 CGF.Builder.CreateBr(ContBB);
282
283 if (AcquireBB) {
284 CGF.Builder.SetInsertPoint(AcquireBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000285 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000286 Size, Align, SuccessOrder, llvm::Acquire);
287 CGF.Builder.CreateBr(ContBB);
288 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
289 AcquireBB);
290 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
291 AcquireBB);
292 }
293 if (SeqCstBB) {
294 CGF.Builder.SetInsertPoint(SeqCstBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000295 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000296 Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
297 CGF.Builder.CreateBr(ContBB);
298 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
299 SeqCstBB);
300 }
301
302 CGF.Builder.SetInsertPoint(ContBB);
303}
304
305static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
306 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
Tim Northovercadbbe12014-06-13 19:43:04 +0000307 llvm::Value *IsWeak, llvm::Value *FailureOrder,
308 uint64_t Size, unsigned Align,
309 llvm::AtomicOrdering Order) {
John McCallfc207f22013-03-07 21:37:12 +0000310 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
311 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
312
313 switch (E->getOp()) {
314 case AtomicExpr::AO__c11_atomic_init:
315 llvm_unreachable("Already handled!");
316
317 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
Tim Northovercadbbe12014-06-13 19:43:04 +0000318 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
319 FailureOrder, Size, Align, Order);
John McCallfc207f22013-03-07 21:37:12 +0000320 return;
Tim Northovercadbbe12014-06-13 19:43:04 +0000321 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
322 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
323 FailureOrder, Size, Align, Order);
324 return;
325 case AtomicExpr::AO__atomic_compare_exchange:
326 case AtomicExpr::AO__atomic_compare_exchange_n: {
327 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
328 emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
329 Val1, Val2, FailureOrder, Size, Align, Order);
330 } else {
331 // Create all the relevant BB's
332 llvm::BasicBlock *StrongBB =
333 CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
334 llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
335 llvm::BasicBlock *ContBB =
336 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
337
338 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
339 SI->addCase(CGF.Builder.getInt1(false), StrongBB);
340
341 CGF.Builder.SetInsertPoint(StrongBB);
342 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
343 FailureOrder, Size, Align, Order);
344 CGF.Builder.CreateBr(ContBB);
345
346 CGF.Builder.SetInsertPoint(WeakBB);
347 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
348 FailureOrder, Size, Align, Order);
349 CGF.Builder.CreateBr(ContBB);
350
351 CGF.Builder.SetInsertPoint(ContBB);
352 }
353 return;
354 }
John McCallfc207f22013-03-07 21:37:12 +0000355 case AtomicExpr::AO__c11_atomic_load:
356 case AtomicExpr::AO__atomic_load_n:
357 case AtomicExpr::AO__atomic_load: {
358 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
359 Load->setAtomic(Order);
360 Load->setAlignment(Size);
361 Load->setVolatile(E->isVolatile());
362 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
363 StoreDest->setAlignment(Align);
364 return;
365 }
366
367 case AtomicExpr::AO__c11_atomic_store:
368 case AtomicExpr::AO__atomic_store:
369 case AtomicExpr::AO__atomic_store_n: {
370 assert(!Dest && "Store does not return a value");
371 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
372 LoadVal1->setAlignment(Align);
373 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
374 Store->setAtomic(Order);
375 Store->setAlignment(Size);
376 Store->setVolatile(E->isVolatile());
377 return;
378 }
379
380 case AtomicExpr::AO__c11_atomic_exchange:
381 case AtomicExpr::AO__atomic_exchange_n:
382 case AtomicExpr::AO__atomic_exchange:
383 Op = llvm::AtomicRMWInst::Xchg;
384 break;
385
386 case AtomicExpr::AO__atomic_add_fetch:
387 PostOp = llvm::Instruction::Add;
388 // Fall through.
389 case AtomicExpr::AO__c11_atomic_fetch_add:
390 case AtomicExpr::AO__atomic_fetch_add:
391 Op = llvm::AtomicRMWInst::Add;
392 break;
393
394 case AtomicExpr::AO__atomic_sub_fetch:
395 PostOp = llvm::Instruction::Sub;
396 // Fall through.
397 case AtomicExpr::AO__c11_atomic_fetch_sub:
398 case AtomicExpr::AO__atomic_fetch_sub:
399 Op = llvm::AtomicRMWInst::Sub;
400 break;
401
402 case AtomicExpr::AO__atomic_and_fetch:
403 PostOp = llvm::Instruction::And;
404 // Fall through.
405 case AtomicExpr::AO__c11_atomic_fetch_and:
406 case AtomicExpr::AO__atomic_fetch_and:
407 Op = llvm::AtomicRMWInst::And;
408 break;
409
410 case AtomicExpr::AO__atomic_or_fetch:
411 PostOp = llvm::Instruction::Or;
412 // Fall through.
413 case AtomicExpr::AO__c11_atomic_fetch_or:
414 case AtomicExpr::AO__atomic_fetch_or:
415 Op = llvm::AtomicRMWInst::Or;
416 break;
417
418 case AtomicExpr::AO__atomic_xor_fetch:
419 PostOp = llvm::Instruction::Xor;
420 // Fall through.
421 case AtomicExpr::AO__c11_atomic_fetch_xor:
422 case AtomicExpr::AO__atomic_fetch_xor:
423 Op = llvm::AtomicRMWInst::Xor;
424 break;
425
426 case AtomicExpr::AO__atomic_nand_fetch:
427 PostOp = llvm::Instruction::And;
428 // Fall through.
429 case AtomicExpr::AO__atomic_fetch_nand:
430 Op = llvm::AtomicRMWInst::Nand;
431 break;
432 }
433
434 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
435 LoadVal1->setAlignment(Align);
436 llvm::AtomicRMWInst *RMWI =
437 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
438 RMWI->setVolatile(E->isVolatile());
439
440 // For __atomic_*_fetch operations, perform the operation again to
441 // determine the value which was written.
442 llvm::Value *Result = RMWI;
443 if (PostOp)
444 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
445 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
446 Result = CGF.Builder.CreateNot(Result);
447 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
448 StoreDest->setAlignment(Align);
449}
450
451// This function emits any expression (scalar, complex, or aggregate)
452// into a temporary alloca.
453static llvm::Value *
454EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
455 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
456 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
457 /*Init*/ true);
458 return DeclPtr;
459}
460
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000461static void
462AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000463 bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
464 SourceLocation Loc) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000465 if (UseOptimizedLibcall) {
466 // Load value and pass it to the function directly.
467 unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
Nick Lewycky2d84e842013-10-02 02:29:49 +0000468 Val = CGF.EmitLoadOfScalar(Val, false, Align, ValTy, Loc);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000469 Args.add(RValue::get(Val), ValTy);
470 } else {
471 // Non-optimized functions always take a reference.
472 Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
473 CGF.getContext().VoidPtrTy);
474 }
475}
476
John McCallfc207f22013-03-07 21:37:12 +0000477RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
478 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
479 QualType MemTy = AtomicTy;
480 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
481 MemTy = AT->getValueType();
482 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
483 uint64_t Size = sizeChars.getQuantity();
484 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
485 unsigned Align = alignChars.getQuantity();
486 unsigned MaxInlineWidthInBits =
John McCallc8e01702013-04-16 22:48:15 +0000487 getTarget().getMaxAtomicInlineWidth();
John McCallfc207f22013-03-07 21:37:12 +0000488 bool UseLibcall = (Size != Align ||
489 getContext().toBits(sizeChars) > MaxInlineWidthInBits);
490
Tim Northovercadbbe12014-06-13 19:43:04 +0000491 llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr,
492 *Val2 = nullptr;
Craig Topper8a13c412014-05-21 05:09:00 +0000493 llvm::Value *Ptr = EmitScalarExpr(E->getPtr());
John McCallfc207f22013-03-07 21:37:12 +0000494
495 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
496 assert(!Dest && "Init does not return a value");
John McCalla8ec7eb2013-03-07 21:37:17 +0000497 LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
498 EmitAtomicInit(E->getVal1(), lvalue);
Craig Topper8a13c412014-05-21 05:09:00 +0000499 return RValue::get(nullptr);
John McCallfc207f22013-03-07 21:37:12 +0000500 }
501
Craig Topper8a13c412014-05-21 05:09:00 +0000502 llvm::Value *Order = EmitScalarExpr(E->getOrder());
John McCallfc207f22013-03-07 21:37:12 +0000503
504 switch (E->getOp()) {
505 case AtomicExpr::AO__c11_atomic_init:
506 llvm_unreachable("Already handled!");
507
508 case AtomicExpr::AO__c11_atomic_load:
509 case AtomicExpr::AO__atomic_load_n:
510 break;
511
512 case AtomicExpr::AO__atomic_load:
513 Dest = EmitScalarExpr(E->getVal1());
514 break;
515
516 case AtomicExpr::AO__atomic_store:
517 Val1 = EmitScalarExpr(E->getVal1());
518 break;
519
520 case AtomicExpr::AO__atomic_exchange:
521 Val1 = EmitScalarExpr(E->getVal1());
522 Dest = EmitScalarExpr(E->getVal2());
523 break;
524
525 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
526 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
527 case AtomicExpr::AO__atomic_compare_exchange_n:
528 case AtomicExpr::AO__atomic_compare_exchange:
529 Val1 = EmitScalarExpr(E->getVal1());
530 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
531 Val2 = EmitScalarExpr(E->getVal2());
532 else
533 Val2 = EmitValToTemp(*this, E->getVal2());
534 OrderFail = EmitScalarExpr(E->getOrderFail());
John McCallfc207f22013-03-07 21:37:12 +0000535 if (E->getNumSubExprs() == 6)
Tim Northovercadbbe12014-06-13 19:43:04 +0000536 IsWeak = EmitScalarExpr(E->getWeak());
John McCallfc207f22013-03-07 21:37:12 +0000537 break;
538
539 case AtomicExpr::AO__c11_atomic_fetch_add:
540 case AtomicExpr::AO__c11_atomic_fetch_sub:
541 if (MemTy->isPointerType()) {
542 // For pointer arithmetic, we're required to do a bit of math:
543 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
544 // ... but only for the C11 builtins. The GNU builtins expect the
545 // user to multiply by sizeof(T).
546 QualType Val1Ty = E->getVal1()->getType();
547 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
548 CharUnits PointeeIncAmt =
549 getContext().getTypeSizeInChars(MemTy->getPointeeType());
550 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
551 Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
552 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
553 break;
554 }
555 // Fall through.
556 case AtomicExpr::AO__atomic_fetch_add:
557 case AtomicExpr::AO__atomic_fetch_sub:
558 case AtomicExpr::AO__atomic_add_fetch:
559 case AtomicExpr::AO__atomic_sub_fetch:
560 case AtomicExpr::AO__c11_atomic_store:
561 case AtomicExpr::AO__c11_atomic_exchange:
562 case AtomicExpr::AO__atomic_store_n:
563 case AtomicExpr::AO__atomic_exchange_n:
564 case AtomicExpr::AO__c11_atomic_fetch_and:
565 case AtomicExpr::AO__c11_atomic_fetch_or:
566 case AtomicExpr::AO__c11_atomic_fetch_xor:
567 case AtomicExpr::AO__atomic_fetch_and:
568 case AtomicExpr::AO__atomic_fetch_or:
569 case AtomicExpr::AO__atomic_fetch_xor:
570 case AtomicExpr::AO__atomic_fetch_nand:
571 case AtomicExpr::AO__atomic_and_fetch:
572 case AtomicExpr::AO__atomic_or_fetch:
573 case AtomicExpr::AO__atomic_xor_fetch:
574 case AtomicExpr::AO__atomic_nand_fetch:
575 Val1 = EmitValToTemp(*this, E->getVal1());
576 break;
577 }
578
579 if (!E->getType()->isVoidType() && !Dest)
580 Dest = CreateMemTemp(E->getType(), ".atomicdst");
581
582 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
583 if (UseLibcall) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000584 bool UseOptimizedLibcall = false;
585 switch (E->getOp()) {
586 case AtomicExpr::AO__c11_atomic_fetch_add:
587 case AtomicExpr::AO__atomic_fetch_add:
588 case AtomicExpr::AO__c11_atomic_fetch_and:
589 case AtomicExpr::AO__atomic_fetch_and:
590 case AtomicExpr::AO__c11_atomic_fetch_or:
591 case AtomicExpr::AO__atomic_fetch_or:
592 case AtomicExpr::AO__c11_atomic_fetch_sub:
593 case AtomicExpr::AO__atomic_fetch_sub:
594 case AtomicExpr::AO__c11_atomic_fetch_xor:
595 case AtomicExpr::AO__atomic_fetch_xor:
596 // For these, only library calls for certain sizes exist.
597 UseOptimizedLibcall = true;
598 break;
599 default:
600 // Only use optimized library calls for sizes for which they exist.
601 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
602 UseOptimizedLibcall = true;
603 break;
604 }
John McCallfc207f22013-03-07 21:37:12 +0000605
John McCallfc207f22013-03-07 21:37:12 +0000606 CallArgList Args;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000607 if (!UseOptimizedLibcall) {
608 // For non-optimized library calls, the size is the first parameter
609 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
610 getContext().getSizeType());
611 }
612 // Atomic address is the first or second parameter
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000613 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
John McCallfc207f22013-03-07 21:37:12 +0000614
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000615 std::string LibCallName;
Logan Chien74798a32014-03-26 17:35:01 +0000616 QualType LoweredMemTy =
617 MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000618 QualType RetTy;
619 bool HaveRetTy = false;
John McCallfc207f22013-03-07 21:37:12 +0000620 switch (E->getOp()) {
621 // There is only one libcall for compare an exchange, because there is no
622 // optimisation benefit possible from a libcall version of a weak compare
623 // and exchange.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000624 // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
John McCallfc207f22013-03-07 21:37:12 +0000625 // void *desired, int success, int failure)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000626 // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
627 // int success, int failure)
John McCallfc207f22013-03-07 21:37:12 +0000628 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
629 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
630 case AtomicExpr::AO__atomic_compare_exchange:
631 case AtomicExpr::AO__atomic_compare_exchange_n:
632 LibCallName = "__atomic_compare_exchange";
633 RetTy = getContext().BoolTy;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000634 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000635 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
636 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
637 E->getExprLoc());
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000638 Args.add(RValue::get(Order), getContext().IntTy);
John McCallfc207f22013-03-07 21:37:12 +0000639 Order = OrderFail;
640 break;
641 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
642 // int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000643 // T __atomic_exchange_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000644 case AtomicExpr::AO__c11_atomic_exchange:
645 case AtomicExpr::AO__atomic_exchange_n:
646 case AtomicExpr::AO__atomic_exchange:
647 LibCallName = "__atomic_exchange";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000648 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
649 E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000650 break;
651 // void __atomic_store(size_t size, void *mem, void *val, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000652 // void __atomic_store_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000653 case AtomicExpr::AO__c11_atomic_store:
654 case AtomicExpr::AO__atomic_store:
655 case AtomicExpr::AO__atomic_store_n:
656 LibCallName = "__atomic_store";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000657 RetTy = getContext().VoidTy;
658 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000659 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
660 E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000661 break;
662 // void __atomic_load(size_t size, void *mem, void *return, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000663 // T __atomic_load_N(T *mem, int order)
John McCallfc207f22013-03-07 21:37:12 +0000664 case AtomicExpr::AO__c11_atomic_load:
665 case AtomicExpr::AO__atomic_load:
666 case AtomicExpr::AO__atomic_load_n:
667 LibCallName = "__atomic_load";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000668 break;
669 // T __atomic_fetch_add_N(T *mem, T val, int order)
670 case AtomicExpr::AO__c11_atomic_fetch_add:
671 case AtomicExpr::AO__atomic_fetch_add:
672 LibCallName = "__atomic_fetch_add";
Logan Chien74798a32014-03-26 17:35:01 +0000673 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000674 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000675 break;
676 // T __atomic_fetch_and_N(T *mem, T val, int order)
677 case AtomicExpr::AO__c11_atomic_fetch_and:
678 case AtomicExpr::AO__atomic_fetch_and:
679 LibCallName = "__atomic_fetch_and";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000680 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
681 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000682 break;
683 // T __atomic_fetch_or_N(T *mem, T val, int order)
684 case AtomicExpr::AO__c11_atomic_fetch_or:
685 case AtomicExpr::AO__atomic_fetch_or:
686 LibCallName = "__atomic_fetch_or";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000687 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
688 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000689 break;
690 // T __atomic_fetch_sub_N(T *mem, T val, int order)
691 case AtomicExpr::AO__c11_atomic_fetch_sub:
692 case AtomicExpr::AO__atomic_fetch_sub:
693 LibCallName = "__atomic_fetch_sub";
Logan Chien74798a32014-03-26 17:35:01 +0000694 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000695 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000696 break;
697 // T __atomic_fetch_xor_N(T *mem, T val, int order)
698 case AtomicExpr::AO__c11_atomic_fetch_xor:
699 case AtomicExpr::AO__atomic_fetch_xor:
700 LibCallName = "__atomic_fetch_xor";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000701 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
702 E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000703 break;
John McCallfc207f22013-03-07 21:37:12 +0000704 default: return EmitUnsupportedRValue(E, "atomic library call");
705 }
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000706
707 // Optimized functions have the size in their name.
708 if (UseOptimizedLibcall)
709 LibCallName += "_" + llvm::utostr(Size);
710 // By default, assume we return a value of the atomic type.
711 if (!HaveRetTy) {
712 if (UseOptimizedLibcall) {
713 // Value is returned directly.
714 RetTy = MemTy;
715 } else {
716 // Value is returned through parameter before the order.
717 RetTy = getContext().VoidTy;
718 Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
719 getContext().VoidPtrTy);
720 }
721 }
John McCallfc207f22013-03-07 21:37:12 +0000722 // order is always the last parameter
723 Args.add(RValue::get(Order),
724 getContext().IntTy);
725
726 const CGFunctionInfo &FuncInfo =
727 CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
728 FunctionType::ExtInfo(), RequiredArgs::All);
729 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
730 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
731 RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000732 if (!RetTy->isVoidType())
John McCallfc207f22013-03-07 21:37:12 +0000733 return Res;
734 if (E->getType()->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +0000735 return RValue::get(nullptr);
Nick Lewycky2d84e842013-10-02 02:29:49 +0000736 return convertTempToRValue(Dest, E->getType(), E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000737 }
738
739 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
740 E->getOp() == AtomicExpr::AO__atomic_store ||
741 E->getOp() == AtomicExpr::AO__atomic_store_n;
742 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
743 E->getOp() == AtomicExpr::AO__atomic_load ||
744 E->getOp() == AtomicExpr::AO__atomic_load_n;
745
746 llvm::Type *IPtrTy =
747 llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
748 llvm::Value *OrigDest = Dest;
749 Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
750 if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
751 if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
752 if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
753
754 if (isa<llvm::ConstantInt>(Order)) {
755 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
756 switch (ord) {
Tim Northovere94a34c2014-03-11 10:49:14 +0000757 case AtomicExpr::AO_ABI_memory_order_relaxed:
Tim Northovercadbbe12014-06-13 19:43:04 +0000758 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000759 Size, Align, llvm::Monotonic);
John McCallfc207f22013-03-07 21:37:12 +0000760 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000761 case AtomicExpr::AO_ABI_memory_order_consume:
762 case AtomicExpr::AO_ABI_memory_order_acquire:
John McCallfc207f22013-03-07 21:37:12 +0000763 if (IsStore)
764 break; // Avoid crashing on code with undefined behavior
Tim Northovercadbbe12014-06-13 19:43:04 +0000765 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000766 Size, Align, llvm::Acquire);
John McCallfc207f22013-03-07 21:37:12 +0000767 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000768 case AtomicExpr::AO_ABI_memory_order_release:
John McCallfc207f22013-03-07 21:37:12 +0000769 if (IsLoad)
770 break; // Avoid crashing on code with undefined behavior
Tim Northovercadbbe12014-06-13 19:43:04 +0000771 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000772 Size, Align, llvm::Release);
John McCallfc207f22013-03-07 21:37:12 +0000773 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000774 case AtomicExpr::AO_ABI_memory_order_acq_rel:
John McCallfc207f22013-03-07 21:37:12 +0000775 if (IsLoad || IsStore)
776 break; // Avoid crashing on code with undefined behavior
Tim Northovercadbbe12014-06-13 19:43:04 +0000777 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000778 Size, Align, llvm::AcquireRelease);
John McCallfc207f22013-03-07 21:37:12 +0000779 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000780 case AtomicExpr::AO_ABI_memory_order_seq_cst:
Tim Northovercadbbe12014-06-13 19:43:04 +0000781 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000782 Size, Align, llvm::SequentiallyConsistent);
John McCallfc207f22013-03-07 21:37:12 +0000783 break;
784 default: // invalid order
785 // We should not ever get here normally, but it's hard to
786 // enforce that in general.
787 break;
788 }
789 if (E->getType()->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +0000790 return RValue::get(nullptr);
Nick Lewycky2d84e842013-10-02 02:29:49 +0000791 return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000792 }
793
794 // Long case, when Order isn't obviously constant.
795
796 // Create all the relevant BB's
Craig Topper8a13c412014-05-21 05:09:00 +0000797 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
798 *ReleaseBB = nullptr, *AcqRelBB = nullptr,
799 *SeqCstBB = nullptr;
John McCallfc207f22013-03-07 21:37:12 +0000800 MonotonicBB = createBasicBlock("monotonic", CurFn);
801 if (!IsStore)
802 AcquireBB = createBasicBlock("acquire", CurFn);
803 if (!IsLoad)
804 ReleaseBB = createBasicBlock("release", CurFn);
805 if (!IsLoad && !IsStore)
806 AcqRelBB = createBasicBlock("acqrel", CurFn);
807 SeqCstBB = createBasicBlock("seqcst", CurFn);
808 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
809
810 // Create the switch for the split
811 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
812 // doesn't matter unless someone is crazy enough to use something that
813 // doesn't fold to a constant for the ordering.
814 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
815 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
816
817 // Emit all the different atomics
818 Builder.SetInsertPoint(MonotonicBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000819 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000820 Size, Align, llvm::Monotonic);
John McCallfc207f22013-03-07 21:37:12 +0000821 Builder.CreateBr(ContBB);
822 if (!IsStore) {
823 Builder.SetInsertPoint(AcquireBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000824 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000825 Size, Align, llvm::Acquire);
John McCallfc207f22013-03-07 21:37:12 +0000826 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000827 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
828 AcquireBB);
829 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
830 AcquireBB);
John McCallfc207f22013-03-07 21:37:12 +0000831 }
832 if (!IsLoad) {
833 Builder.SetInsertPoint(ReleaseBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000834 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000835 Size, Align, llvm::Release);
John McCallfc207f22013-03-07 21:37:12 +0000836 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000837 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
838 ReleaseBB);
John McCallfc207f22013-03-07 21:37:12 +0000839 }
840 if (!IsLoad && !IsStore) {
841 Builder.SetInsertPoint(AcqRelBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000842 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000843 Size, Align, llvm::AcquireRelease);
John McCallfc207f22013-03-07 21:37:12 +0000844 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000845 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
846 AcqRelBB);
John McCallfc207f22013-03-07 21:37:12 +0000847 }
848 Builder.SetInsertPoint(SeqCstBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000849 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000850 Size, Align, llvm::SequentiallyConsistent);
John McCallfc207f22013-03-07 21:37:12 +0000851 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000852 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
853 SeqCstBB);
John McCallfc207f22013-03-07 21:37:12 +0000854
855 // Cleanup and return
856 Builder.SetInsertPoint(ContBB);
857 if (E->getType()->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +0000858 return RValue::get(nullptr);
Nick Lewycky2d84e842013-10-02 02:29:49 +0000859 return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000860}
John McCalla8ec7eb2013-03-07 21:37:17 +0000861
862llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
863 unsigned addrspace =
864 cast<llvm::PointerType>(addr->getType())->getAddressSpace();
865 llvm::IntegerType *ty =
866 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
867 return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
868}
869
870RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000871 AggValueSlot resultSlot,
872 SourceLocation loc) const {
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000873 if (EvaluationKind == TEK_Aggregate)
874 return resultSlot.asRValue();
John McCalla8ec7eb2013-03-07 21:37:17 +0000875
876 // Drill into the padding structure if we have one.
877 if (hasPadding())
878 addr = CGF.Builder.CreateStructGEP(addr, 0);
879
John McCalla8ec7eb2013-03-07 21:37:17 +0000880 // Otherwise, just convert the temporary to an r-value using the
881 // normal conversion routine.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000882 return CGF.convertTempToRValue(addr, getValueType(), loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000883}
884
885/// Emit a load from an l-value of atomic type. Note that the r-value
886/// we produce is an r-value of the atomic *value* type.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000887RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
888 AggValueSlot resultSlot) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000889 AtomicInfo atomics(*this, src);
890
891 // Check whether we should use a library call.
892 if (atomics.shouldUseLibcall()) {
893 llvm::Value *tempAddr;
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000894 if (!resultSlot.isIgnored()) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000895 assert(atomics.getEvaluationKind() == TEK_Aggregate);
896 tempAddr = resultSlot.getAddr();
897 } else {
898 tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
899 }
900
901 // void __atomic_load(size_t size, void *mem, void *return, int order);
902 CallArgList args;
903 args.add(RValue::get(atomics.getAtomicSizeValue()),
904 getContext().getSizeType());
905 args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
906 getContext().VoidPtrTy);
907 args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
908 getContext().VoidPtrTy);
Tim Northovere94a34c2014-03-11 10:49:14 +0000909 args.add(RValue::get(llvm::ConstantInt::get(
910 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
John McCalla8ec7eb2013-03-07 21:37:17 +0000911 getContext().IntTy);
912 emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
913
914 // Produce the r-value.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000915 return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000916 }
917
918 // Okay, we're doing this natively.
919 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
920 llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
921 load->setAtomic(llvm::SequentiallyConsistent);
922
923 // Other decoration.
924 load->setAlignment(src.getAlignment().getQuantity());
925 if (src.isVolatileQualified())
926 load->setVolatile(true);
927 if (src.getTBAAInfo())
928 CGM.DecorateInstruction(load, src.getTBAAInfo());
929
930 // Okay, turn that back into the original value type.
931 QualType valueType = atomics.getValueType();
932 llvm::Value *result = load;
933
934 // If we're ignoring an aggregate return, don't do anything.
935 if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
Craig Topper8a13c412014-05-21 05:09:00 +0000936 return RValue::getAggregate(nullptr, false);
John McCalla8ec7eb2013-03-07 21:37:17 +0000937
938 // The easiest way to do this this is to go through memory, but we
939 // try not to in some easy cases.
940 if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
941 llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
942 if (isa<llvm::IntegerType>(resultTy)) {
943 assert(result->getType() == resultTy);
944 result = EmitFromMemory(result, valueType);
945 } else if (isa<llvm::PointerType>(resultTy)) {
946 result = Builder.CreateIntToPtr(result, resultTy);
947 } else {
948 result = Builder.CreateBitCast(result, resultTy);
949 }
950 return RValue::get(result);
951 }
952
953 // Create a temporary. This needs to be big enough to hold the
954 // atomic integer.
955 llvm::Value *temp;
956 bool tempIsVolatile = false;
957 CharUnits tempAlignment;
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000958 if (atomics.getEvaluationKind() == TEK_Aggregate) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000959 assert(!resultSlot.isIgnored());
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000960 temp = resultSlot.getAddr();
961 tempAlignment = atomics.getValueAlignment();
John McCalla8ec7eb2013-03-07 21:37:17 +0000962 tempIsVolatile = resultSlot.isVolatile();
963 } else {
964 temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
965 tempAlignment = atomics.getAtomicAlignment();
966 }
967
968 // Slam the integer into the temporary.
969 llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
970 Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
971 ->setVolatile(tempIsVolatile);
972
Nick Lewycky2d84e842013-10-02 02:29:49 +0000973 return atomics.convertTempToRValue(temp, resultSlot, loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000974}
975
976
977
978/// Copy an r-value into memory as part of storing to an atomic type.
979/// This needs to create a bit-pattern suitable for atomic operations.
980void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
981 // If we have an r-value, the rvalue should be of the atomic type,
982 // which means that the caller is responsible for having zeroed
983 // any padding. Just do an aggregate copy of that type.
984 if (rvalue.isAggregate()) {
985 CGF.EmitAggregateCopy(dest.getAddress(),
986 rvalue.getAggregateAddr(),
987 getAtomicType(),
988 (rvalue.isVolatileQualified()
989 || dest.isVolatileQualified()),
990 dest.getAlignment());
991 return;
992 }
993
994 // Okay, otherwise we're copying stuff.
995
996 // Zero out the buffer if necessary.
997 emitMemSetZeroIfNecessary(dest);
998
999 // Drill past the padding if present.
1000 dest = projectValue(dest);
1001
1002 // Okay, store the rvalue in.
1003 if (rvalue.isScalar()) {
1004 CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
1005 } else {
1006 CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
1007 }
1008}
1009
1010
1011/// Materialize an r-value into memory for the purposes of storing it
1012/// to an atomic type.
1013llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
1014 // Aggregate r-values are already in memory, and EmitAtomicStore
1015 // requires them to be values of the atomic type.
1016 if (rvalue.isAggregate())
1017 return rvalue.getAggregateAddr();
1018
1019 // Otherwise, make a temporary and materialize into it.
1020 llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
1021 LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
1022 emitCopyIntoMemory(rvalue, tempLV);
1023 return temp;
1024}
1025
1026/// Emit a store to an l-value of atomic type.
1027///
1028/// Note that the r-value is expected to be an r-value *of the atomic
1029/// type*; this means that for aggregate r-values, it should include
1030/// storage for any padding that was necessary.
Nick Lewycky5fa40c32013-10-01 21:51:38 +00001031void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
John McCalla8ec7eb2013-03-07 21:37:17 +00001032 // If this is an aggregate r-value, it should agree in type except
1033 // maybe for address-space qualification.
1034 assert(!rvalue.isAggregate() ||
1035 rvalue.getAggregateAddr()->getType()->getPointerElementType()
1036 == dest.getAddress()->getType()->getPointerElementType());
1037
1038 AtomicInfo atomics(*this, dest);
1039
1040 // If this is an initialization, just put the value there normally.
1041 if (isInit) {
1042 atomics.emitCopyIntoMemory(rvalue, dest);
1043 return;
1044 }
1045
1046 // Check whether we should use a library call.
1047 if (atomics.shouldUseLibcall()) {
1048 // Produce a source address.
1049 llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
1050
1051 // void __atomic_store(size_t size, void *mem, void *val, int order)
1052 CallArgList args;
1053 args.add(RValue::get(atomics.getAtomicSizeValue()),
1054 getContext().getSizeType());
1055 args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
1056 getContext().VoidPtrTy);
1057 args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
1058 getContext().VoidPtrTy);
Tim Northovere94a34c2014-03-11 10:49:14 +00001059 args.add(RValue::get(llvm::ConstantInt::get(
1060 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
John McCalla8ec7eb2013-03-07 21:37:17 +00001061 getContext().IntTy);
1062 emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1063 return;
1064 }
1065
1066 // Okay, we're doing this natively.
1067 llvm::Value *intValue;
1068
1069 // If we've got a scalar value of the right size, try to avoid going
1070 // through memory.
1071 if (rvalue.isScalar() && !atomics.hasPadding()) {
1072 llvm::Value *value = rvalue.getScalarVal();
1073 if (isa<llvm::IntegerType>(value->getType())) {
1074 intValue = value;
1075 } else {
1076 llvm::IntegerType *inputIntTy =
1077 llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
1078 if (isa<llvm::PointerType>(value->getType())) {
1079 intValue = Builder.CreatePtrToInt(value, inputIntTy);
1080 } else {
1081 intValue = Builder.CreateBitCast(value, inputIntTy);
1082 }
1083 }
1084
1085 // Otherwise, we need to go through memory.
1086 } else {
1087 // Put the r-value in memory.
1088 llvm::Value *addr = atomics.materializeRValue(rvalue);
1089
1090 // Cast the temporary to the atomic int type and pull a value out.
1091 addr = atomics.emitCastToAtomicIntPointer(addr);
1092 intValue = Builder.CreateAlignedLoad(addr,
1093 atomics.getAtomicAlignment().getQuantity());
1094 }
1095
1096 // Do the atomic store.
1097 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
1098 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1099
1100 // Initializations don't need to be atomic.
1101 if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
1102
1103 // Other decoration.
1104 store->setAlignment(dest.getAlignment().getQuantity());
1105 if (dest.isVolatileQualified())
1106 store->setVolatile(true);
1107 if (dest.getTBAAInfo())
1108 CGM.DecorateInstruction(store, dest.getTBAAInfo());
1109}
1110
1111void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1112 AtomicInfo atomics(*this, dest);
1113
1114 switch (atomics.getEvaluationKind()) {
1115 case TEK_Scalar: {
1116 llvm::Value *value = EmitScalarExpr(init);
1117 atomics.emitCopyIntoMemory(RValue::get(value), dest);
1118 return;
1119 }
1120
1121 case TEK_Complex: {
1122 ComplexPairTy value = EmitComplexExpr(init);
1123 atomics.emitCopyIntoMemory(RValue::getComplex(value), dest);
1124 return;
1125 }
1126
1127 case TEK_Aggregate: {
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001128 // Fix up the destination if the initializer isn't an expression
1129 // of atomic type.
1130 bool Zeroed = false;
John McCalla8ec7eb2013-03-07 21:37:17 +00001131 if (!init->getType()->isAtomicType()) {
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001132 Zeroed = atomics.emitMemSetZeroIfNecessary(dest);
John McCalla8ec7eb2013-03-07 21:37:17 +00001133 dest = atomics.projectValue(dest);
1134 }
1135
1136 // Evaluate the expression directly into the destination.
1137 AggValueSlot slot = AggValueSlot::forLValue(dest,
1138 AggValueSlot::IsNotDestructed,
1139 AggValueSlot::DoesNotNeedGCBarriers,
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001140 AggValueSlot::IsNotAliased,
1141 Zeroed ? AggValueSlot::IsZeroed :
1142 AggValueSlot::IsNotZeroed);
1143
John McCalla8ec7eb2013-03-07 21:37:17 +00001144 EmitAggExpr(init, slot);
1145 return;
1146 }
1147 }
1148 llvm_unreachable("bad evaluation kind");
1149}