blob: f4d90a6dfd40852fd76bc8107523dfe33b0f9ed9 [file] [log] [blame]
John McCallfafaaef2013-03-07 21:37:12 +00001//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the code for emitting atomic operations.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCall.h"
16#include "CodeGenModule.h"
17#include "clang/AST/ASTContext.h"
Mark Lacey8b549992013-10-30 21:53:58 +000018#include "clang/CodeGen/CGFunctionInfo.h"
Ed Schoutene4692492013-05-31 19:27:59 +000019#include "llvm/ADT/StringExtras.h"
John McCallfafaaef2013-03-07 21:37:12 +000020#include "llvm/IR/DataLayout.h"
21#include "llvm/IR/Intrinsics.h"
John McCall9eda3ab2013-03-07 21:37:17 +000022#include "llvm/IR/Operator.h"
John McCallfafaaef2013-03-07 21:37:12 +000023
24using namespace clang;
25using namespace CodeGen;
26
John McCall9eda3ab2013-03-07 21:37:17 +000027namespace {
28 class AtomicInfo {
29 CodeGenFunction &CGF;
30 QualType AtomicTy;
31 QualType ValueTy;
32 uint64_t AtomicSizeInBits;
33 uint64_t ValueSizeInBits;
34 CharUnits AtomicAlign;
35 CharUnits ValueAlign;
36 CharUnits LValueAlign;
37 TypeEvaluationKind EvaluationKind;
38 bool UseLibcall;
39 public:
40 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
41 assert(lvalue.isSimple());
42
43 AtomicTy = lvalue.getType();
44 ValueTy = AtomicTy->castAs<AtomicType>()->getValueType();
45 EvaluationKind = CGF.getEvaluationKind(ValueTy);
46
47 ASTContext &C = CGF.getContext();
48
Stephen Hines176edba2014-12-01 14:53:08 -080049 uint64_t ValueAlignInBits;
50 uint64_t AtomicAlignInBits;
51 TypeInfo ValueTI = C.getTypeInfo(ValueTy);
52 ValueSizeInBits = ValueTI.Width;
53 ValueAlignInBits = ValueTI.Align;
John McCall9eda3ab2013-03-07 21:37:17 +000054
Stephen Hines176edba2014-12-01 14:53:08 -080055 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
56 AtomicSizeInBits = AtomicTI.Width;
57 AtomicAlignInBits = AtomicTI.Align;
John McCall9eda3ab2013-03-07 21:37:17 +000058
59 assert(ValueSizeInBits <= AtomicSizeInBits);
Stephen Hines176edba2014-12-01 14:53:08 -080060 assert(ValueAlignInBits <= AtomicAlignInBits);
John McCall9eda3ab2013-03-07 21:37:17 +000061
Stephen Hines176edba2014-12-01 14:53:08 -080062 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
63 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
John McCall9eda3ab2013-03-07 21:37:17 +000064 if (lvalue.getAlignment().isZero())
65 lvalue.setAlignment(AtomicAlign);
66
67 UseLibcall =
68 (AtomicSizeInBits > uint64_t(C.toBits(lvalue.getAlignment())) ||
69 AtomicSizeInBits > C.getTargetInfo().getMaxAtomicInlineWidth());
70 }
71
72 QualType getAtomicType() const { return AtomicTy; }
73 QualType getValueType() const { return ValueTy; }
74 CharUnits getAtomicAlignment() const { return AtomicAlign; }
75 CharUnits getValueAlignment() const { return ValueAlign; }
76 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
77 uint64_t getValueSizeInBits() const { return AtomicSizeInBits; }
78 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
79 bool shouldUseLibcall() const { return UseLibcall; }
80
81 /// Is the atomic size larger than the underlying value type?
82 ///
83 /// Note that the absence of padding does not mean that atomic
84 /// objects are completely interchangeable with non-atomic
85 /// objects: we might have promoted the alignment of a type
86 /// without making it bigger.
87 bool hasPadding() const {
88 return (ValueSizeInBits != AtomicSizeInBits);
89 }
90
Eli Friedman336d9df2013-07-11 01:32:21 +000091 bool emitMemSetZeroIfNecessary(LValue dest) const;
John McCall9eda3ab2013-03-07 21:37:17 +000092
93 llvm::Value *getAtomicSizeValue() const {
94 CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
95 return CGF.CGM.getSize(size);
96 }
97
98 /// Cast the given pointer to an integer pointer suitable for
99 /// atomic operations.
100 llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
101
102 /// Turn an atomic-layout object into an r-value.
103 RValue convertTempToRValue(llvm::Value *addr,
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000104 AggValueSlot resultSlot,
105 SourceLocation loc) const;
John McCall9eda3ab2013-03-07 21:37:17 +0000106
107 /// Copy an atomic r-value into atomic-layout memory.
108 void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
109
110 /// Project an l-value down to the value field.
111 LValue projectValue(LValue lvalue) const {
112 llvm::Value *addr = lvalue.getAddress();
113 if (hasPadding())
114 addr = CGF.Builder.CreateStructGEP(addr, 0);
115
116 return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(),
117 CGF.getContext(), lvalue.getTBAAInfo());
118 }
119
120 /// Materialize an atomic r-value in atomic-layout memory.
121 llvm::Value *materializeRValue(RValue rvalue) const;
122
123 private:
124 bool requiresMemSetZero(llvm::Type *type) const;
125 };
126}
127
128static RValue emitAtomicLibcall(CodeGenFunction &CGF,
129 StringRef fnName,
130 QualType resultType,
131 CallArgList &args) {
132 const CGFunctionInfo &fnInfo =
133 CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
134 FunctionType::ExtInfo(), RequiredArgs::All);
135 llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
136 llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
137 return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
138}
139
140/// Does a store of the given IR type modify the full expected width?
141static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
142 uint64_t expectedSize) {
143 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
144}
145
146/// Does the atomic type require memsetting to zero before initialization?
147///
148/// The IR type is provided as a way of making certain queries faster.
149bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
150 // If the atomic type has size padding, we definitely need a memset.
151 if (hasPadding()) return true;
152
153 // Otherwise, do some simple heuristics to try to avoid it:
154 switch (getEvaluationKind()) {
155 // For scalars and complexes, check whether the store size of the
156 // type uses the full size.
157 case TEK_Scalar:
158 return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
159 case TEK_Complex:
160 return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
161 AtomicSizeInBits / 2);
162
Eli Friedman336d9df2013-07-11 01:32:21 +0000163 // Padding in structs has an undefined bit pattern. User beware.
John McCall9eda3ab2013-03-07 21:37:17 +0000164 case TEK_Aggregate:
Eli Friedman336d9df2013-07-11 01:32:21 +0000165 return false;
John McCall9eda3ab2013-03-07 21:37:17 +0000166 }
167 llvm_unreachable("bad evaluation kind");
168}
169
Eli Friedman336d9df2013-07-11 01:32:21 +0000170bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
John McCall9eda3ab2013-03-07 21:37:17 +0000171 llvm::Value *addr = dest.getAddress();
172 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
Eli Friedman336d9df2013-07-11 01:32:21 +0000173 return false;
John McCall9eda3ab2013-03-07 21:37:17 +0000174
175 CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
176 AtomicSizeInBits / 8,
177 dest.getAlignment().getQuantity());
Eli Friedman336d9df2013-07-11 01:32:21 +0000178 return true;
John McCall9eda3ab2013-03-07 21:37:17 +0000179}
180
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700181static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
Stephen Hines651f13c2014-04-23 16:59:28 -0700182 llvm::Value *Dest, llvm::Value *Ptr,
183 llvm::Value *Val1, llvm::Value *Val2,
184 uint64_t Size, unsigned Align,
185 llvm::AtomicOrdering SuccessOrder,
186 llvm::AtomicOrdering FailureOrder) {
187 // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
188 llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
189 Expected->setAlignment(Align);
190 llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
191 Desired->setAlignment(Align);
192
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700193 llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
Stephen Hines651f13c2014-04-23 16:59:28 -0700194 Ptr, Expected, Desired, SuccessOrder, FailureOrder);
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700195 Pair->setVolatile(E->isVolatile());
196 Pair->setWeak(IsWeak);
Stephen Hines651f13c2014-04-23 16:59:28 -0700197
198 // Cmp holds the result of the compare-exchange operation: true on success,
199 // false on failure.
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700200 llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
201 llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
Stephen Hines651f13c2014-04-23 16:59:28 -0700202
203 // This basic block is used to hold the store instruction if the operation
204 // failed.
205 llvm::BasicBlock *StoreExpectedBB =
206 CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
207
208 // This basic block is the exit point of the operation, we should end up
209 // here regardless of whether or not the operation succeeded.
210 llvm::BasicBlock *ContinueBB =
211 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
212
213 // Update Expected if Expected isn't equal to Old, otherwise branch to the
214 // exit point.
215 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
216
217 CGF.Builder.SetInsertPoint(StoreExpectedBB);
218 // Update the memory at Expected with Old's value.
219 llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
220 StoreExpected->setAlignment(Align);
221 // Finally, branch to the exit point.
222 CGF.Builder.CreateBr(ContinueBB);
223
224 CGF.Builder.SetInsertPoint(ContinueBB);
225 // Update the memory at Dest with Cmp's value.
226 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
227 return;
228}
229
230/// Given an ordering required on success, emit all possible cmpxchg
231/// instructions to cope with the provided (but possibly only dynamically known)
232/// FailureOrder.
233static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700234 bool IsWeak, llvm::Value *Dest,
235 llvm::Value *Ptr, llvm::Value *Val1,
236 llvm::Value *Val2,
Stephen Hines651f13c2014-04-23 16:59:28 -0700237 llvm::Value *FailureOrderVal,
238 uint64_t Size, unsigned Align,
239 llvm::AtomicOrdering SuccessOrder) {
240 llvm::AtomicOrdering FailureOrder;
241 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
242 switch (FO->getSExtValue()) {
243 default:
244 FailureOrder = llvm::Monotonic;
245 break;
246 case AtomicExpr::AO_ABI_memory_order_consume:
247 case AtomicExpr::AO_ABI_memory_order_acquire:
248 FailureOrder = llvm::Acquire;
249 break;
250 case AtomicExpr::AO_ABI_memory_order_seq_cst:
251 FailureOrder = llvm::SequentiallyConsistent;
252 break;
253 }
254 if (FailureOrder >= SuccessOrder) {
255 // Don't assert on undefined behaviour.
256 FailureOrder =
257 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
258 }
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700259 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align,
260 SuccessOrder, FailureOrder);
Stephen Hines651f13c2014-04-23 16:59:28 -0700261 return;
262 }
263
264 // Create all the relevant BB's
Stephen Hines6bcf27b2014-05-29 04:14:42 -0700265 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
266 *SeqCstBB = nullptr;
Stephen Hines651f13c2014-04-23 16:59:28 -0700267 MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
268 if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
269 AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
270 if (SuccessOrder == llvm::SequentiallyConsistent)
271 SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
272
273 llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
274
275 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
276
277 // Emit all the different atomics
278
279 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
280 // doesn't matter unless someone is crazy enough to use something that
281 // doesn't fold to a constant for the ordering.
282 CGF.Builder.SetInsertPoint(MonotonicBB);
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700283 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Stephen Hines651f13c2014-04-23 16:59:28 -0700284 Size, Align, SuccessOrder, llvm::Monotonic);
285 CGF.Builder.CreateBr(ContBB);
286
287 if (AcquireBB) {
288 CGF.Builder.SetInsertPoint(AcquireBB);
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700289 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Stephen Hines651f13c2014-04-23 16:59:28 -0700290 Size, Align, SuccessOrder, llvm::Acquire);
291 CGF.Builder.CreateBr(ContBB);
292 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
293 AcquireBB);
294 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
295 AcquireBB);
296 }
297 if (SeqCstBB) {
298 CGF.Builder.SetInsertPoint(SeqCstBB);
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700299 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Stephen Hines651f13c2014-04-23 16:59:28 -0700300 Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
301 CGF.Builder.CreateBr(ContBB);
302 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
303 SeqCstBB);
304 }
305
306 CGF.Builder.SetInsertPoint(ContBB);
307}
308
309static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
310 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700311 llvm::Value *IsWeak, llvm::Value *FailureOrder,
312 uint64_t Size, unsigned Align,
313 llvm::AtomicOrdering Order) {
John McCallfafaaef2013-03-07 21:37:12 +0000314 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
315 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
316
317 switch (E->getOp()) {
318 case AtomicExpr::AO__c11_atomic_init:
319 llvm_unreachable("Already handled!");
320
321 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700322 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
323 FailureOrder, Size, Align, Order);
John McCallfafaaef2013-03-07 21:37:12 +0000324 return;
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700325 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
326 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
327 FailureOrder, Size, Align, Order);
328 return;
329 case AtomicExpr::AO__atomic_compare_exchange:
330 case AtomicExpr::AO__atomic_compare_exchange_n: {
331 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
332 emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
333 Val1, Val2, FailureOrder, Size, Align, Order);
334 } else {
335 // Create all the relevant BB's
336 llvm::BasicBlock *StrongBB =
337 CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
338 llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
339 llvm::BasicBlock *ContBB =
340 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
341
342 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
343 SI->addCase(CGF.Builder.getInt1(false), StrongBB);
344
345 CGF.Builder.SetInsertPoint(StrongBB);
346 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
347 FailureOrder, Size, Align, Order);
348 CGF.Builder.CreateBr(ContBB);
349
350 CGF.Builder.SetInsertPoint(WeakBB);
351 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
352 FailureOrder, Size, Align, Order);
353 CGF.Builder.CreateBr(ContBB);
354
355 CGF.Builder.SetInsertPoint(ContBB);
356 }
357 return;
358 }
John McCallfafaaef2013-03-07 21:37:12 +0000359 case AtomicExpr::AO__c11_atomic_load:
360 case AtomicExpr::AO__atomic_load_n:
361 case AtomicExpr::AO__atomic_load: {
362 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
363 Load->setAtomic(Order);
364 Load->setAlignment(Size);
365 Load->setVolatile(E->isVolatile());
366 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
367 StoreDest->setAlignment(Align);
368 return;
369 }
370
371 case AtomicExpr::AO__c11_atomic_store:
372 case AtomicExpr::AO__atomic_store:
373 case AtomicExpr::AO__atomic_store_n: {
374 assert(!Dest && "Store does not return a value");
375 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
376 LoadVal1->setAlignment(Align);
377 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
378 Store->setAtomic(Order);
379 Store->setAlignment(Size);
380 Store->setVolatile(E->isVolatile());
381 return;
382 }
383
384 case AtomicExpr::AO__c11_atomic_exchange:
385 case AtomicExpr::AO__atomic_exchange_n:
386 case AtomicExpr::AO__atomic_exchange:
387 Op = llvm::AtomicRMWInst::Xchg;
388 break;
389
390 case AtomicExpr::AO__atomic_add_fetch:
391 PostOp = llvm::Instruction::Add;
392 // Fall through.
393 case AtomicExpr::AO__c11_atomic_fetch_add:
394 case AtomicExpr::AO__atomic_fetch_add:
395 Op = llvm::AtomicRMWInst::Add;
396 break;
397
398 case AtomicExpr::AO__atomic_sub_fetch:
399 PostOp = llvm::Instruction::Sub;
400 // Fall through.
401 case AtomicExpr::AO__c11_atomic_fetch_sub:
402 case AtomicExpr::AO__atomic_fetch_sub:
403 Op = llvm::AtomicRMWInst::Sub;
404 break;
405
406 case AtomicExpr::AO__atomic_and_fetch:
407 PostOp = llvm::Instruction::And;
408 // Fall through.
409 case AtomicExpr::AO__c11_atomic_fetch_and:
410 case AtomicExpr::AO__atomic_fetch_and:
411 Op = llvm::AtomicRMWInst::And;
412 break;
413
414 case AtomicExpr::AO__atomic_or_fetch:
415 PostOp = llvm::Instruction::Or;
416 // Fall through.
417 case AtomicExpr::AO__c11_atomic_fetch_or:
418 case AtomicExpr::AO__atomic_fetch_or:
419 Op = llvm::AtomicRMWInst::Or;
420 break;
421
422 case AtomicExpr::AO__atomic_xor_fetch:
423 PostOp = llvm::Instruction::Xor;
424 // Fall through.
425 case AtomicExpr::AO__c11_atomic_fetch_xor:
426 case AtomicExpr::AO__atomic_fetch_xor:
427 Op = llvm::AtomicRMWInst::Xor;
428 break;
429
430 case AtomicExpr::AO__atomic_nand_fetch:
431 PostOp = llvm::Instruction::And;
432 // Fall through.
433 case AtomicExpr::AO__atomic_fetch_nand:
434 Op = llvm::AtomicRMWInst::Nand;
435 break;
436 }
437
438 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
439 LoadVal1->setAlignment(Align);
440 llvm::AtomicRMWInst *RMWI =
441 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
442 RMWI->setVolatile(E->isVolatile());
443
444 // For __atomic_*_fetch operations, perform the operation again to
445 // determine the value which was written.
446 llvm::Value *Result = RMWI;
447 if (PostOp)
448 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
449 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
450 Result = CGF.Builder.CreateNot(Result);
451 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
452 StoreDest->setAlignment(Align);
453}
454
455// This function emits any expression (scalar, complex, or aggregate)
456// into a temporary alloca.
457static llvm::Value *
458EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
459 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
460 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
461 /*Init*/ true);
462 return DeclPtr;
463}
464
Ed Schoutene4692492013-05-31 19:27:59 +0000465static void
466AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000467 bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
Stephen Hines176edba2014-12-01 14:53:08 -0800468 SourceLocation Loc, CharUnits SizeInChars) {
Ed Schoutene4692492013-05-31 19:27:59 +0000469 if (UseOptimizedLibcall) {
470 // Load value and pass it to the function directly.
471 unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
Stephen Hines176edba2014-12-01 14:53:08 -0800472 int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
473 ValTy =
474 CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
475 llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
476 SizeInBits)->getPointerTo();
477 Val = CGF.EmitLoadOfScalar(CGF.Builder.CreateBitCast(Val, IPtrTy), false,
478 Align, CGF.getContext().getPointerType(ValTy),
479 Loc);
480 // Coerce the value into an appropriately sized integer type.
Ed Schoutene4692492013-05-31 19:27:59 +0000481 Args.add(RValue::get(Val), ValTy);
482 } else {
483 // Non-optimized functions always take a reference.
484 Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
485 CGF.getContext().VoidPtrTy);
486 }
487}
488
John McCallfafaaef2013-03-07 21:37:12 +0000489RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
490 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
491 QualType MemTy = AtomicTy;
492 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
493 MemTy = AT->getValueType();
494 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
495 uint64_t Size = sizeChars.getQuantity();
496 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
497 unsigned Align = alignChars.getQuantity();
498 unsigned MaxInlineWidthInBits =
John McCall64aa4b32013-04-16 22:48:15 +0000499 getTarget().getMaxAtomicInlineWidth();
John McCallfafaaef2013-03-07 21:37:12 +0000500 bool UseLibcall = (Size != Align ||
501 getContext().toBits(sizeChars) > MaxInlineWidthInBits);
502
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700503 llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr,
504 *Val2 = nullptr;
Stephen Hines6bcf27b2014-05-29 04:14:42 -0700505 llvm::Value *Ptr = EmitScalarExpr(E->getPtr());
John McCallfafaaef2013-03-07 21:37:12 +0000506
507 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
508 assert(!Dest && "Init does not return a value");
John McCall9eda3ab2013-03-07 21:37:17 +0000509 LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
510 EmitAtomicInit(E->getVal1(), lvalue);
Stephen Hines6bcf27b2014-05-29 04:14:42 -0700511 return RValue::get(nullptr);
John McCallfafaaef2013-03-07 21:37:12 +0000512 }
513
Stephen Hines6bcf27b2014-05-29 04:14:42 -0700514 llvm::Value *Order = EmitScalarExpr(E->getOrder());
John McCallfafaaef2013-03-07 21:37:12 +0000515
516 switch (E->getOp()) {
517 case AtomicExpr::AO__c11_atomic_init:
518 llvm_unreachable("Already handled!");
519
520 case AtomicExpr::AO__c11_atomic_load:
521 case AtomicExpr::AO__atomic_load_n:
522 break;
523
524 case AtomicExpr::AO__atomic_load:
525 Dest = EmitScalarExpr(E->getVal1());
526 break;
527
528 case AtomicExpr::AO__atomic_store:
529 Val1 = EmitScalarExpr(E->getVal1());
530 break;
531
532 case AtomicExpr::AO__atomic_exchange:
533 Val1 = EmitScalarExpr(E->getVal1());
534 Dest = EmitScalarExpr(E->getVal2());
535 break;
536
537 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
538 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
539 case AtomicExpr::AO__atomic_compare_exchange_n:
540 case AtomicExpr::AO__atomic_compare_exchange:
541 Val1 = EmitScalarExpr(E->getVal1());
542 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
543 Val2 = EmitScalarExpr(E->getVal2());
544 else
545 Val2 = EmitValToTemp(*this, E->getVal2());
546 OrderFail = EmitScalarExpr(E->getOrderFail());
John McCallfafaaef2013-03-07 21:37:12 +0000547 if (E->getNumSubExprs() == 6)
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700548 IsWeak = EmitScalarExpr(E->getWeak());
John McCallfafaaef2013-03-07 21:37:12 +0000549 break;
550
551 case AtomicExpr::AO__c11_atomic_fetch_add:
552 case AtomicExpr::AO__c11_atomic_fetch_sub:
553 if (MemTy->isPointerType()) {
554 // For pointer arithmetic, we're required to do a bit of math:
555 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
556 // ... but only for the C11 builtins. The GNU builtins expect the
557 // user to multiply by sizeof(T).
558 QualType Val1Ty = E->getVal1()->getType();
559 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
560 CharUnits PointeeIncAmt =
561 getContext().getTypeSizeInChars(MemTy->getPointeeType());
562 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
563 Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
564 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
565 break;
566 }
567 // Fall through.
568 case AtomicExpr::AO__atomic_fetch_add:
569 case AtomicExpr::AO__atomic_fetch_sub:
570 case AtomicExpr::AO__atomic_add_fetch:
571 case AtomicExpr::AO__atomic_sub_fetch:
572 case AtomicExpr::AO__c11_atomic_store:
573 case AtomicExpr::AO__c11_atomic_exchange:
574 case AtomicExpr::AO__atomic_store_n:
575 case AtomicExpr::AO__atomic_exchange_n:
576 case AtomicExpr::AO__c11_atomic_fetch_and:
577 case AtomicExpr::AO__c11_atomic_fetch_or:
578 case AtomicExpr::AO__c11_atomic_fetch_xor:
579 case AtomicExpr::AO__atomic_fetch_and:
580 case AtomicExpr::AO__atomic_fetch_or:
581 case AtomicExpr::AO__atomic_fetch_xor:
582 case AtomicExpr::AO__atomic_fetch_nand:
583 case AtomicExpr::AO__atomic_and_fetch:
584 case AtomicExpr::AO__atomic_or_fetch:
585 case AtomicExpr::AO__atomic_xor_fetch:
586 case AtomicExpr::AO__atomic_nand_fetch:
587 Val1 = EmitValToTemp(*this, E->getVal1());
588 break;
589 }
590
591 if (!E->getType()->isVoidType() && !Dest)
592 Dest = CreateMemTemp(E->getType(), ".atomicdst");
593
594 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
595 if (UseLibcall) {
Ed Schoutene4692492013-05-31 19:27:59 +0000596 bool UseOptimizedLibcall = false;
597 switch (E->getOp()) {
598 case AtomicExpr::AO__c11_atomic_fetch_add:
599 case AtomicExpr::AO__atomic_fetch_add:
600 case AtomicExpr::AO__c11_atomic_fetch_and:
601 case AtomicExpr::AO__atomic_fetch_and:
602 case AtomicExpr::AO__c11_atomic_fetch_or:
603 case AtomicExpr::AO__atomic_fetch_or:
604 case AtomicExpr::AO__c11_atomic_fetch_sub:
605 case AtomicExpr::AO__atomic_fetch_sub:
606 case AtomicExpr::AO__c11_atomic_fetch_xor:
607 case AtomicExpr::AO__atomic_fetch_xor:
608 // For these, only library calls for certain sizes exist.
609 UseOptimizedLibcall = true;
610 break;
611 default:
612 // Only use optimized library calls for sizes for which they exist.
613 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
614 UseOptimizedLibcall = true;
615 break;
616 }
John McCallfafaaef2013-03-07 21:37:12 +0000617
John McCallfafaaef2013-03-07 21:37:12 +0000618 CallArgList Args;
Ed Schoutene4692492013-05-31 19:27:59 +0000619 if (!UseOptimizedLibcall) {
620 // For non-optimized library calls, the size is the first parameter
621 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
622 getContext().getSizeType());
623 }
624 // Atomic address is the first or second parameter
Nick Lewycky5d4a7552013-10-01 21:51:38 +0000625 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
John McCallfafaaef2013-03-07 21:37:12 +0000626
Ed Schoutene4692492013-05-31 19:27:59 +0000627 std::string LibCallName;
Stephen Hines651f13c2014-04-23 16:59:28 -0700628 QualType LoweredMemTy =
629 MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
Ed Schoutene4692492013-05-31 19:27:59 +0000630 QualType RetTy;
631 bool HaveRetTy = false;
John McCallfafaaef2013-03-07 21:37:12 +0000632 switch (E->getOp()) {
633 // There is only one libcall for compare an exchange, because there is no
634 // optimisation benefit possible from a libcall version of a weak compare
635 // and exchange.
Ed Schoutene4692492013-05-31 19:27:59 +0000636 // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
John McCallfafaaef2013-03-07 21:37:12 +0000637 // void *desired, int success, int failure)
Ed Schoutene4692492013-05-31 19:27:59 +0000638 // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
639 // int success, int failure)
John McCallfafaaef2013-03-07 21:37:12 +0000640 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
641 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
642 case AtomicExpr::AO__atomic_compare_exchange:
643 case AtomicExpr::AO__atomic_compare_exchange_n:
644 LibCallName = "__atomic_compare_exchange";
645 RetTy = getContext().BoolTy;
Ed Schoutene4692492013-05-31 19:27:59 +0000646 HaveRetTy = true;
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000647 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
648 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
Stephen Hines176edba2014-12-01 14:53:08 -0800649 E->getExprLoc(), sizeChars);
Nick Lewycky5d4a7552013-10-01 21:51:38 +0000650 Args.add(RValue::get(Order), getContext().IntTy);
John McCallfafaaef2013-03-07 21:37:12 +0000651 Order = OrderFail;
652 break;
653 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
654 // int order)
Ed Schoutene4692492013-05-31 19:27:59 +0000655 // T __atomic_exchange_N(T *mem, T val, int order)
John McCallfafaaef2013-03-07 21:37:12 +0000656 case AtomicExpr::AO__c11_atomic_exchange:
657 case AtomicExpr::AO__atomic_exchange_n:
658 case AtomicExpr::AO__atomic_exchange:
659 LibCallName = "__atomic_exchange";
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000660 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
Stephen Hines176edba2014-12-01 14:53:08 -0800661 E->getExprLoc(), sizeChars);
John McCallfafaaef2013-03-07 21:37:12 +0000662 break;
663 // void __atomic_store(size_t size, void *mem, void *val, int order)
Ed Schoutene4692492013-05-31 19:27:59 +0000664 // void __atomic_store_N(T *mem, T val, int order)
John McCallfafaaef2013-03-07 21:37:12 +0000665 case AtomicExpr::AO__c11_atomic_store:
666 case AtomicExpr::AO__atomic_store:
667 case AtomicExpr::AO__atomic_store_n:
668 LibCallName = "__atomic_store";
Ed Schoutene4692492013-05-31 19:27:59 +0000669 RetTy = getContext().VoidTy;
670 HaveRetTy = true;
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000671 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
Stephen Hines176edba2014-12-01 14:53:08 -0800672 E->getExprLoc(), sizeChars);
John McCallfafaaef2013-03-07 21:37:12 +0000673 break;
674 // void __atomic_load(size_t size, void *mem, void *return, int order)
Ed Schoutene4692492013-05-31 19:27:59 +0000675 // T __atomic_load_N(T *mem, int order)
John McCallfafaaef2013-03-07 21:37:12 +0000676 case AtomicExpr::AO__c11_atomic_load:
677 case AtomicExpr::AO__atomic_load:
678 case AtomicExpr::AO__atomic_load_n:
679 LibCallName = "__atomic_load";
Ed Schoutene4692492013-05-31 19:27:59 +0000680 break;
681 // T __atomic_fetch_add_N(T *mem, T val, int order)
682 case AtomicExpr::AO__c11_atomic_fetch_add:
683 case AtomicExpr::AO__atomic_fetch_add:
684 LibCallName = "__atomic_fetch_add";
Stephen Hines651f13c2014-04-23 16:59:28 -0700685 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
Stephen Hines176edba2014-12-01 14:53:08 -0800686 E->getExprLoc(), sizeChars);
Ed Schoutene4692492013-05-31 19:27:59 +0000687 break;
688 // T __atomic_fetch_and_N(T *mem, T val, int order)
689 case AtomicExpr::AO__c11_atomic_fetch_and:
690 case AtomicExpr::AO__atomic_fetch_and:
691 LibCallName = "__atomic_fetch_and";
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000692 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
Stephen Hines176edba2014-12-01 14:53:08 -0800693 E->getExprLoc(), sizeChars);
Ed Schoutene4692492013-05-31 19:27:59 +0000694 break;
695 // T __atomic_fetch_or_N(T *mem, T val, int order)
696 case AtomicExpr::AO__c11_atomic_fetch_or:
697 case AtomicExpr::AO__atomic_fetch_or:
698 LibCallName = "__atomic_fetch_or";
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000699 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
Stephen Hines176edba2014-12-01 14:53:08 -0800700 E->getExprLoc(), sizeChars);
Ed Schoutene4692492013-05-31 19:27:59 +0000701 break;
702 // T __atomic_fetch_sub_N(T *mem, T val, int order)
703 case AtomicExpr::AO__c11_atomic_fetch_sub:
704 case AtomicExpr::AO__atomic_fetch_sub:
705 LibCallName = "__atomic_fetch_sub";
Stephen Hines651f13c2014-04-23 16:59:28 -0700706 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
Stephen Hines176edba2014-12-01 14:53:08 -0800707 E->getExprLoc(), sizeChars);
Ed Schoutene4692492013-05-31 19:27:59 +0000708 break;
709 // T __atomic_fetch_xor_N(T *mem, T val, int order)
710 case AtomicExpr::AO__c11_atomic_fetch_xor:
711 case AtomicExpr::AO__atomic_fetch_xor:
712 LibCallName = "__atomic_fetch_xor";
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000713 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
Stephen Hines176edba2014-12-01 14:53:08 -0800714 E->getExprLoc(), sizeChars);
John McCallfafaaef2013-03-07 21:37:12 +0000715 break;
John McCallfafaaef2013-03-07 21:37:12 +0000716 default: return EmitUnsupportedRValue(E, "atomic library call");
717 }
Ed Schoutene4692492013-05-31 19:27:59 +0000718
719 // Optimized functions have the size in their name.
720 if (UseOptimizedLibcall)
721 LibCallName += "_" + llvm::utostr(Size);
722 // By default, assume we return a value of the atomic type.
723 if (!HaveRetTy) {
724 if (UseOptimizedLibcall) {
725 // Value is returned directly.
Stephen Hines176edba2014-12-01 14:53:08 -0800726 // The function returns an appropriately sized integer type.
727 RetTy = getContext().getIntTypeForBitwidth(
728 getContext().toBits(sizeChars), /*Signed=*/false);
Ed Schoutene4692492013-05-31 19:27:59 +0000729 } else {
730 // Value is returned through parameter before the order.
731 RetTy = getContext().VoidTy;
732 Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
733 getContext().VoidPtrTy);
734 }
735 }
John McCallfafaaef2013-03-07 21:37:12 +0000736 // order is always the last parameter
737 Args.add(RValue::get(Order),
738 getContext().IntTy);
739
740 const CGFunctionInfo &FuncInfo =
741 CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
742 FunctionType::ExtInfo(), RequiredArgs::All);
743 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
744 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
745 RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
Stephen Hines176edba2014-12-01 14:53:08 -0800746 if (!RetTy->isVoidType()) {
747 if (UseOptimizedLibcall) {
748 if (HaveRetTy)
749 return Res;
750 llvm::StoreInst *StoreDest = Builder.CreateStore(
751 Res.getScalarVal(),
752 Builder.CreateBitCast(Dest, FTy->getReturnType()->getPointerTo()));
753 StoreDest->setAlignment(Align);
754 }
755 }
John McCallfafaaef2013-03-07 21:37:12 +0000756 if (E->getType()->isVoidType())
Stephen Hines6bcf27b2014-05-29 04:14:42 -0700757 return RValue::get(nullptr);
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000758 return convertTempToRValue(Dest, E->getType(), E->getExprLoc());
John McCallfafaaef2013-03-07 21:37:12 +0000759 }
760
761 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
762 E->getOp() == AtomicExpr::AO__atomic_store ||
763 E->getOp() == AtomicExpr::AO__atomic_store_n;
764 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
765 E->getOp() == AtomicExpr::AO__atomic_load ||
766 E->getOp() == AtomicExpr::AO__atomic_load_n;
767
768 llvm::Type *IPtrTy =
769 llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
770 llvm::Value *OrigDest = Dest;
771 Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
772 if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
773 if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
774 if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
775
776 if (isa<llvm::ConstantInt>(Order)) {
777 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
778 switch (ord) {
Stephen Hines651f13c2014-04-23 16:59:28 -0700779 case AtomicExpr::AO_ABI_memory_order_relaxed:
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700780 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Stephen Hines651f13c2014-04-23 16:59:28 -0700781 Size, Align, llvm::Monotonic);
John McCallfafaaef2013-03-07 21:37:12 +0000782 break;
Stephen Hines651f13c2014-04-23 16:59:28 -0700783 case AtomicExpr::AO_ABI_memory_order_consume:
784 case AtomicExpr::AO_ABI_memory_order_acquire:
John McCallfafaaef2013-03-07 21:37:12 +0000785 if (IsStore)
786 break; // Avoid crashing on code with undefined behavior
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700787 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Stephen Hines651f13c2014-04-23 16:59:28 -0700788 Size, Align, llvm::Acquire);
John McCallfafaaef2013-03-07 21:37:12 +0000789 break;
Stephen Hines651f13c2014-04-23 16:59:28 -0700790 case AtomicExpr::AO_ABI_memory_order_release:
John McCallfafaaef2013-03-07 21:37:12 +0000791 if (IsLoad)
792 break; // Avoid crashing on code with undefined behavior
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700793 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Stephen Hines651f13c2014-04-23 16:59:28 -0700794 Size, Align, llvm::Release);
John McCallfafaaef2013-03-07 21:37:12 +0000795 break;
Stephen Hines651f13c2014-04-23 16:59:28 -0700796 case AtomicExpr::AO_ABI_memory_order_acq_rel:
John McCallfafaaef2013-03-07 21:37:12 +0000797 if (IsLoad || IsStore)
798 break; // Avoid crashing on code with undefined behavior
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700799 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Stephen Hines651f13c2014-04-23 16:59:28 -0700800 Size, Align, llvm::AcquireRelease);
John McCallfafaaef2013-03-07 21:37:12 +0000801 break;
Stephen Hines651f13c2014-04-23 16:59:28 -0700802 case AtomicExpr::AO_ABI_memory_order_seq_cst:
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700803 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Stephen Hines651f13c2014-04-23 16:59:28 -0700804 Size, Align, llvm::SequentiallyConsistent);
John McCallfafaaef2013-03-07 21:37:12 +0000805 break;
806 default: // invalid order
807 // We should not ever get here normally, but it's hard to
808 // enforce that in general.
809 break;
810 }
811 if (E->getType()->isVoidType())
Stephen Hines6bcf27b2014-05-29 04:14:42 -0700812 return RValue::get(nullptr);
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000813 return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
John McCallfafaaef2013-03-07 21:37:12 +0000814 }
815
816 // Long case, when Order isn't obviously constant.
817
818 // Create all the relevant BB's
Stephen Hines6bcf27b2014-05-29 04:14:42 -0700819 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
820 *ReleaseBB = nullptr, *AcqRelBB = nullptr,
821 *SeqCstBB = nullptr;
John McCallfafaaef2013-03-07 21:37:12 +0000822 MonotonicBB = createBasicBlock("monotonic", CurFn);
823 if (!IsStore)
824 AcquireBB = createBasicBlock("acquire", CurFn);
825 if (!IsLoad)
826 ReleaseBB = createBasicBlock("release", CurFn);
827 if (!IsLoad && !IsStore)
828 AcqRelBB = createBasicBlock("acqrel", CurFn);
829 SeqCstBB = createBasicBlock("seqcst", CurFn);
830 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
831
832 // Create the switch for the split
833 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
834 // doesn't matter unless someone is crazy enough to use something that
835 // doesn't fold to a constant for the ordering.
836 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
837 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
838
839 // Emit all the different atomics
840 Builder.SetInsertPoint(MonotonicBB);
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700841 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Stephen Hines651f13c2014-04-23 16:59:28 -0700842 Size, Align, llvm::Monotonic);
John McCallfafaaef2013-03-07 21:37:12 +0000843 Builder.CreateBr(ContBB);
844 if (!IsStore) {
845 Builder.SetInsertPoint(AcquireBB);
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700846 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Stephen Hines651f13c2014-04-23 16:59:28 -0700847 Size, Align, llvm::Acquire);
John McCallfafaaef2013-03-07 21:37:12 +0000848 Builder.CreateBr(ContBB);
Stephen Hines651f13c2014-04-23 16:59:28 -0700849 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
850 AcquireBB);
851 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
852 AcquireBB);
John McCallfafaaef2013-03-07 21:37:12 +0000853 }
854 if (!IsLoad) {
855 Builder.SetInsertPoint(ReleaseBB);
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700856 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Stephen Hines651f13c2014-04-23 16:59:28 -0700857 Size, Align, llvm::Release);
John McCallfafaaef2013-03-07 21:37:12 +0000858 Builder.CreateBr(ContBB);
Stephen Hines651f13c2014-04-23 16:59:28 -0700859 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
860 ReleaseBB);
John McCallfafaaef2013-03-07 21:37:12 +0000861 }
862 if (!IsLoad && !IsStore) {
863 Builder.SetInsertPoint(AcqRelBB);
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700864 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Stephen Hines651f13c2014-04-23 16:59:28 -0700865 Size, Align, llvm::AcquireRelease);
John McCallfafaaef2013-03-07 21:37:12 +0000866 Builder.CreateBr(ContBB);
Stephen Hines651f13c2014-04-23 16:59:28 -0700867 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
868 AcqRelBB);
John McCallfafaaef2013-03-07 21:37:12 +0000869 }
870 Builder.SetInsertPoint(SeqCstBB);
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700871 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Stephen Hines651f13c2014-04-23 16:59:28 -0700872 Size, Align, llvm::SequentiallyConsistent);
John McCallfafaaef2013-03-07 21:37:12 +0000873 Builder.CreateBr(ContBB);
Stephen Hines651f13c2014-04-23 16:59:28 -0700874 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
875 SeqCstBB);
John McCallfafaaef2013-03-07 21:37:12 +0000876
877 // Cleanup and return
878 Builder.SetInsertPoint(ContBB);
879 if (E->getType()->isVoidType())
Stephen Hines6bcf27b2014-05-29 04:14:42 -0700880 return RValue::get(nullptr);
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000881 return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
John McCallfafaaef2013-03-07 21:37:12 +0000882}
John McCall9eda3ab2013-03-07 21:37:17 +0000883
884llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
885 unsigned addrspace =
886 cast<llvm::PointerType>(addr->getType())->getAddressSpace();
887 llvm::IntegerType *ty =
888 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
889 return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
890}
891
892RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000893 AggValueSlot resultSlot,
894 SourceLocation loc) const {
Eli Friedman336d9df2013-07-11 01:32:21 +0000895 if (EvaluationKind == TEK_Aggregate)
896 return resultSlot.asRValue();
John McCall9eda3ab2013-03-07 21:37:17 +0000897
898 // Drill into the padding structure if we have one.
899 if (hasPadding())
900 addr = CGF.Builder.CreateStructGEP(addr, 0);
901
John McCall9eda3ab2013-03-07 21:37:17 +0000902 // Otherwise, just convert the temporary to an r-value using the
903 // normal conversion routine.
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000904 return CGF.convertTempToRValue(addr, getValueType(), loc);
John McCall9eda3ab2013-03-07 21:37:17 +0000905}
906
907/// Emit a load from an l-value of atomic type. Note that the r-value
908/// we produce is an r-value of the atomic *value* type.
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000909RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
910 AggValueSlot resultSlot) {
John McCall9eda3ab2013-03-07 21:37:17 +0000911 AtomicInfo atomics(*this, src);
912
913 // Check whether we should use a library call.
914 if (atomics.shouldUseLibcall()) {
915 llvm::Value *tempAddr;
Eli Friedman336d9df2013-07-11 01:32:21 +0000916 if (!resultSlot.isIgnored()) {
John McCall9eda3ab2013-03-07 21:37:17 +0000917 assert(atomics.getEvaluationKind() == TEK_Aggregate);
918 tempAddr = resultSlot.getAddr();
919 } else {
920 tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
921 }
922
923 // void __atomic_load(size_t size, void *mem, void *return, int order);
924 CallArgList args;
925 args.add(RValue::get(atomics.getAtomicSizeValue()),
926 getContext().getSizeType());
927 args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
928 getContext().VoidPtrTy);
929 args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
930 getContext().VoidPtrTy);
Stephen Hines651f13c2014-04-23 16:59:28 -0700931 args.add(RValue::get(llvm::ConstantInt::get(
932 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
John McCall9eda3ab2013-03-07 21:37:17 +0000933 getContext().IntTy);
934 emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
935
936 // Produce the r-value.
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000937 return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
John McCall9eda3ab2013-03-07 21:37:17 +0000938 }
939
940 // Okay, we're doing this natively.
941 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
942 llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
943 load->setAtomic(llvm::SequentiallyConsistent);
944
945 // Other decoration.
946 load->setAlignment(src.getAlignment().getQuantity());
947 if (src.isVolatileQualified())
948 load->setVolatile(true);
949 if (src.getTBAAInfo())
950 CGM.DecorateInstruction(load, src.getTBAAInfo());
951
952 // Okay, turn that back into the original value type.
953 QualType valueType = atomics.getValueType();
954 llvm::Value *result = load;
955
956 // If we're ignoring an aggregate return, don't do anything.
957 if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
Stephen Hines6bcf27b2014-05-29 04:14:42 -0700958 return RValue::getAggregate(nullptr, false);
John McCall9eda3ab2013-03-07 21:37:17 +0000959
960 // The easiest way to do this this is to go through memory, but we
961 // try not to in some easy cases.
962 if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
963 llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
964 if (isa<llvm::IntegerType>(resultTy)) {
965 assert(result->getType() == resultTy);
966 result = EmitFromMemory(result, valueType);
967 } else if (isa<llvm::PointerType>(resultTy)) {
968 result = Builder.CreateIntToPtr(result, resultTy);
969 } else {
970 result = Builder.CreateBitCast(result, resultTy);
971 }
972 return RValue::get(result);
973 }
974
975 // Create a temporary. This needs to be big enough to hold the
976 // atomic integer.
977 llvm::Value *temp;
978 bool tempIsVolatile = false;
979 CharUnits tempAlignment;
Eli Friedman336d9df2013-07-11 01:32:21 +0000980 if (atomics.getEvaluationKind() == TEK_Aggregate) {
John McCall9eda3ab2013-03-07 21:37:17 +0000981 assert(!resultSlot.isIgnored());
Eli Friedman336d9df2013-07-11 01:32:21 +0000982 temp = resultSlot.getAddr();
983 tempAlignment = atomics.getValueAlignment();
John McCall9eda3ab2013-03-07 21:37:17 +0000984 tempIsVolatile = resultSlot.isVolatile();
985 } else {
986 temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
987 tempAlignment = atomics.getAtomicAlignment();
988 }
989
990 // Slam the integer into the temporary.
991 llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
992 Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
993 ->setVolatile(tempIsVolatile);
994
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000995 return atomics.convertTempToRValue(temp, resultSlot, loc);
John McCall9eda3ab2013-03-07 21:37:17 +0000996}
997
998
999
1000/// Copy an r-value into memory as part of storing to an atomic type.
1001/// This needs to create a bit-pattern suitable for atomic operations.
1002void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
1003 // If we have an r-value, the rvalue should be of the atomic type,
1004 // which means that the caller is responsible for having zeroed
1005 // any padding. Just do an aggregate copy of that type.
1006 if (rvalue.isAggregate()) {
1007 CGF.EmitAggregateCopy(dest.getAddress(),
1008 rvalue.getAggregateAddr(),
1009 getAtomicType(),
1010 (rvalue.isVolatileQualified()
1011 || dest.isVolatileQualified()),
1012 dest.getAlignment());
1013 return;
1014 }
1015
1016 // Okay, otherwise we're copying stuff.
1017
1018 // Zero out the buffer if necessary.
1019 emitMemSetZeroIfNecessary(dest);
1020
1021 // Drill past the padding if present.
1022 dest = projectValue(dest);
1023
1024 // Okay, store the rvalue in.
1025 if (rvalue.isScalar()) {
1026 CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
1027 } else {
1028 CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
1029 }
1030}
1031
1032
1033/// Materialize an r-value into memory for the purposes of storing it
1034/// to an atomic type.
1035llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
1036 // Aggregate r-values are already in memory, and EmitAtomicStore
1037 // requires them to be values of the atomic type.
1038 if (rvalue.isAggregate())
1039 return rvalue.getAggregateAddr();
1040
1041 // Otherwise, make a temporary and materialize into it.
1042 llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
1043 LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
1044 emitCopyIntoMemory(rvalue, tempLV);
1045 return temp;
1046}
1047
1048/// Emit a store to an l-value of atomic type.
1049///
1050/// Note that the r-value is expected to be an r-value *of the atomic
1051/// type*; this means that for aggregate r-values, it should include
1052/// storage for any padding that was necessary.
Nick Lewycky5d4a7552013-10-01 21:51:38 +00001053void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
John McCall9eda3ab2013-03-07 21:37:17 +00001054 // If this is an aggregate r-value, it should agree in type except
1055 // maybe for address-space qualification.
1056 assert(!rvalue.isAggregate() ||
1057 rvalue.getAggregateAddr()->getType()->getPointerElementType()
1058 == dest.getAddress()->getType()->getPointerElementType());
1059
1060 AtomicInfo atomics(*this, dest);
1061
1062 // If this is an initialization, just put the value there normally.
1063 if (isInit) {
1064 atomics.emitCopyIntoMemory(rvalue, dest);
1065 return;
1066 }
1067
1068 // Check whether we should use a library call.
1069 if (atomics.shouldUseLibcall()) {
1070 // Produce a source address.
1071 llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
1072
1073 // void __atomic_store(size_t size, void *mem, void *val, int order)
1074 CallArgList args;
1075 args.add(RValue::get(atomics.getAtomicSizeValue()),
1076 getContext().getSizeType());
1077 args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
1078 getContext().VoidPtrTy);
1079 args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
1080 getContext().VoidPtrTy);
Stephen Hines651f13c2014-04-23 16:59:28 -07001081 args.add(RValue::get(llvm::ConstantInt::get(
1082 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
John McCall9eda3ab2013-03-07 21:37:17 +00001083 getContext().IntTy);
1084 emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1085 return;
1086 }
1087
1088 // Okay, we're doing this natively.
1089 llvm::Value *intValue;
1090
1091 // If we've got a scalar value of the right size, try to avoid going
1092 // through memory.
1093 if (rvalue.isScalar() && !atomics.hasPadding()) {
1094 llvm::Value *value = rvalue.getScalarVal();
1095 if (isa<llvm::IntegerType>(value->getType())) {
1096 intValue = value;
1097 } else {
1098 llvm::IntegerType *inputIntTy =
1099 llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
1100 if (isa<llvm::PointerType>(value->getType())) {
1101 intValue = Builder.CreatePtrToInt(value, inputIntTy);
1102 } else {
1103 intValue = Builder.CreateBitCast(value, inputIntTy);
1104 }
1105 }
1106
1107 // Otherwise, we need to go through memory.
1108 } else {
1109 // Put the r-value in memory.
1110 llvm::Value *addr = atomics.materializeRValue(rvalue);
1111
1112 // Cast the temporary to the atomic int type and pull a value out.
1113 addr = atomics.emitCastToAtomicIntPointer(addr);
1114 intValue = Builder.CreateAlignedLoad(addr,
1115 atomics.getAtomicAlignment().getQuantity());
1116 }
1117
1118 // Do the atomic store.
1119 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
1120 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1121
1122 // Initializations don't need to be atomic.
1123 if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
1124
1125 // Other decoration.
1126 store->setAlignment(dest.getAlignment().getQuantity());
1127 if (dest.isVolatileQualified())
1128 store->setVolatile(true);
1129 if (dest.getTBAAInfo())
1130 CGM.DecorateInstruction(store, dest.getTBAAInfo());
1131}
1132
1133void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1134 AtomicInfo atomics(*this, dest);
1135
1136 switch (atomics.getEvaluationKind()) {
1137 case TEK_Scalar: {
1138 llvm::Value *value = EmitScalarExpr(init);
1139 atomics.emitCopyIntoMemory(RValue::get(value), dest);
1140 return;
1141 }
1142
1143 case TEK_Complex: {
1144 ComplexPairTy value = EmitComplexExpr(init);
1145 atomics.emitCopyIntoMemory(RValue::getComplex(value), dest);
1146 return;
1147 }
1148
1149 case TEK_Aggregate: {
Eli Friedman336d9df2013-07-11 01:32:21 +00001150 // Fix up the destination if the initializer isn't an expression
1151 // of atomic type.
1152 bool Zeroed = false;
John McCall9eda3ab2013-03-07 21:37:17 +00001153 if (!init->getType()->isAtomicType()) {
Eli Friedman336d9df2013-07-11 01:32:21 +00001154 Zeroed = atomics.emitMemSetZeroIfNecessary(dest);
John McCall9eda3ab2013-03-07 21:37:17 +00001155 dest = atomics.projectValue(dest);
1156 }
1157
1158 // Evaluate the expression directly into the destination.
1159 AggValueSlot slot = AggValueSlot::forLValue(dest,
1160 AggValueSlot::IsNotDestructed,
1161 AggValueSlot::DoesNotNeedGCBarriers,
Eli Friedman336d9df2013-07-11 01:32:21 +00001162 AggValueSlot::IsNotAliased,
1163 Zeroed ? AggValueSlot::IsZeroed :
1164 AggValueSlot::IsNotZeroed);
1165
John McCall9eda3ab2013-03-07 21:37:17 +00001166 EmitAggExpr(init, slot);
1167 return;
1168 }
1169 }
1170 llvm_unreachable("bad evaluation kind");
1171}