blob: 49f98b2a07a2a2e939376d3e7dc7ead3ad09d306 [file] [log] [blame]
John McCallfc207f22013-03-07 21:37:12 +00001//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the code for emitting atomic operations.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCall.h"
16#include "CodeGenModule.h"
17#include "clang/AST/ASTContext.h"
Mark Laceya8e7df32013-10-30 21:53:58 +000018#include "clang/CodeGen/CGFunctionInfo.h"
Ed Schoutenc7e82bd2013-05-31 19:27:59 +000019#include "llvm/ADT/StringExtras.h"
John McCallfc207f22013-03-07 21:37:12 +000020#include "llvm/IR/DataLayout.h"
21#include "llvm/IR/Intrinsics.h"
John McCalla8ec7eb2013-03-07 21:37:17 +000022#include "llvm/IR/Operator.h"
John McCallfc207f22013-03-07 21:37:12 +000023
24using namespace clang;
25using namespace CodeGen;
26
John McCalla8ec7eb2013-03-07 21:37:17 +000027namespace {
28 class AtomicInfo {
29 CodeGenFunction &CGF;
30 QualType AtomicTy;
31 QualType ValueTy;
32 uint64_t AtomicSizeInBits;
33 uint64_t ValueSizeInBits;
34 CharUnits AtomicAlign;
35 CharUnits ValueAlign;
36 CharUnits LValueAlign;
37 TypeEvaluationKind EvaluationKind;
38 bool UseLibcall;
39 public:
40 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
41 assert(lvalue.isSimple());
42
43 AtomicTy = lvalue.getType();
44 ValueTy = AtomicTy->castAs<AtomicType>()->getValueType();
45 EvaluationKind = CGF.getEvaluationKind(ValueTy);
46
47 ASTContext &C = CGF.getContext();
48
David Majnemer34b57492014-07-30 01:30:47 +000049 uint64_t ValueAlignInBits;
50 uint64_t AtomicAlignInBits;
51 TypeInfo ValueTI = C.getTypeInfo(ValueTy);
52 ValueSizeInBits = ValueTI.Width;
53 ValueAlignInBits = ValueTI.Align;
John McCalla8ec7eb2013-03-07 21:37:17 +000054
David Majnemer34b57492014-07-30 01:30:47 +000055 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
56 AtomicSizeInBits = AtomicTI.Width;
57 AtomicAlignInBits = AtomicTI.Align;
John McCalla8ec7eb2013-03-07 21:37:17 +000058
59 assert(ValueSizeInBits <= AtomicSizeInBits);
David Majnemer34b57492014-07-30 01:30:47 +000060 assert(ValueAlignInBits <= AtomicAlignInBits);
John McCalla8ec7eb2013-03-07 21:37:17 +000061
David Majnemer34b57492014-07-30 01:30:47 +000062 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
63 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
John McCalla8ec7eb2013-03-07 21:37:17 +000064 if (lvalue.getAlignment().isZero())
65 lvalue.setAlignment(AtomicAlign);
66
67 UseLibcall =
68 (AtomicSizeInBits > uint64_t(C.toBits(lvalue.getAlignment())) ||
69 AtomicSizeInBits > C.getTargetInfo().getMaxAtomicInlineWidth());
70 }
71
72 QualType getAtomicType() const { return AtomicTy; }
73 QualType getValueType() const { return ValueTy; }
74 CharUnits getAtomicAlignment() const { return AtomicAlign; }
75 CharUnits getValueAlignment() const { return ValueAlign; }
76 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
77 uint64_t getValueSizeInBits() const { return AtomicSizeInBits; }
78 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
79 bool shouldUseLibcall() const { return UseLibcall; }
80
81 /// Is the atomic size larger than the underlying value type?
82 ///
83 /// Note that the absence of padding does not mean that atomic
84 /// objects are completely interchangeable with non-atomic
85 /// objects: we might have promoted the alignment of a type
86 /// without making it bigger.
87 bool hasPadding() const {
88 return (ValueSizeInBits != AtomicSizeInBits);
89 }
90
Eli Friedmanbe4504d2013-07-11 01:32:21 +000091 bool emitMemSetZeroIfNecessary(LValue dest) const;
John McCalla8ec7eb2013-03-07 21:37:17 +000092
93 llvm::Value *getAtomicSizeValue() const {
94 CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
95 return CGF.CGM.getSize(size);
96 }
97
98 /// Cast the given pointer to an integer pointer suitable for
99 /// atomic operations.
100 llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
101
102 /// Turn an atomic-layout object into an r-value.
103 RValue convertTempToRValue(llvm::Value *addr,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000104 AggValueSlot resultSlot,
105 SourceLocation loc) const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000106
107 /// Copy an atomic r-value into atomic-layout memory.
108 void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
109
110 /// Project an l-value down to the value field.
111 LValue projectValue(LValue lvalue) const {
112 llvm::Value *addr = lvalue.getAddress();
113 if (hasPadding())
114 addr = CGF.Builder.CreateStructGEP(addr, 0);
115
116 return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(),
117 CGF.getContext(), lvalue.getTBAAInfo());
118 }
119
120 /// Materialize an atomic r-value in atomic-layout memory.
121 llvm::Value *materializeRValue(RValue rvalue) const;
122
123 private:
124 bool requiresMemSetZero(llvm::Type *type) const;
125 };
126}
127
128static RValue emitAtomicLibcall(CodeGenFunction &CGF,
129 StringRef fnName,
130 QualType resultType,
131 CallArgList &args) {
132 const CGFunctionInfo &fnInfo =
133 CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
134 FunctionType::ExtInfo(), RequiredArgs::All);
135 llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
136 llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
137 return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
138}
139
140/// Does a store of the given IR type modify the full expected width?
141static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
142 uint64_t expectedSize) {
143 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
144}
145
146/// Does the atomic type require memsetting to zero before initialization?
147///
148/// The IR type is provided as a way of making certain queries faster.
149bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
150 // If the atomic type has size padding, we definitely need a memset.
151 if (hasPadding()) return true;
152
153 // Otherwise, do some simple heuristics to try to avoid it:
154 switch (getEvaluationKind()) {
155 // For scalars and complexes, check whether the store size of the
156 // type uses the full size.
157 case TEK_Scalar:
158 return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
159 case TEK_Complex:
160 return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
161 AtomicSizeInBits / 2);
162
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000163 // Padding in structs has an undefined bit pattern. User beware.
John McCalla8ec7eb2013-03-07 21:37:17 +0000164 case TEK_Aggregate:
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000165 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000166 }
167 llvm_unreachable("bad evaluation kind");
168}
169
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000170bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
John McCalla8ec7eb2013-03-07 21:37:17 +0000171 llvm::Value *addr = dest.getAddress();
172 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000173 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000174
175 CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
176 AtomicSizeInBits / 8,
177 dest.getAlignment().getQuantity());
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000178 return true;
John McCalla8ec7eb2013-03-07 21:37:17 +0000179}
180
Tim Northovercadbbe12014-06-13 19:43:04 +0000181static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
Tim Northover9c177222014-03-13 19:25:48 +0000182 llvm::Value *Dest, llvm::Value *Ptr,
183 llvm::Value *Val1, llvm::Value *Val2,
184 uint64_t Size, unsigned Align,
185 llvm::AtomicOrdering SuccessOrder,
186 llvm::AtomicOrdering FailureOrder) {
187 // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
188 llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
189 Expected->setAlignment(Align);
190 llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
191 Desired->setAlignment(Align);
192
Tim Northoverb49b04b2014-06-13 14:24:59 +0000193 llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
Tim Northover9c177222014-03-13 19:25:48 +0000194 Ptr, Expected, Desired, SuccessOrder, FailureOrder);
Tim Northoverb49b04b2014-06-13 14:24:59 +0000195 Pair->setVolatile(E->isVolatile());
Tim Northovercadbbe12014-06-13 19:43:04 +0000196 Pair->setWeak(IsWeak);
Tim Northover9c177222014-03-13 19:25:48 +0000197
198 // Cmp holds the result of the compare-exchange operation: true on success,
199 // false on failure.
Tim Northoverb49b04b2014-06-13 14:24:59 +0000200 llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
201 llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
Tim Northover9c177222014-03-13 19:25:48 +0000202
203 // This basic block is used to hold the store instruction if the operation
204 // failed.
205 llvm::BasicBlock *StoreExpectedBB =
206 CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
207
208 // This basic block is the exit point of the operation, we should end up
209 // here regardless of whether or not the operation succeeded.
210 llvm::BasicBlock *ContinueBB =
211 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
212
213 // Update Expected if Expected isn't equal to Old, otherwise branch to the
214 // exit point.
215 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
216
217 CGF.Builder.SetInsertPoint(StoreExpectedBB);
218 // Update the memory at Expected with Old's value.
219 llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
220 StoreExpected->setAlignment(Align);
221 // Finally, branch to the exit point.
222 CGF.Builder.CreateBr(ContinueBB);
223
224 CGF.Builder.SetInsertPoint(ContinueBB);
225 // Update the memory at Dest with Cmp's value.
226 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
227 return;
228}
229
230/// Given an ordering required on success, emit all possible cmpxchg
231/// instructions to cope with the provided (but possibly only dynamically known)
232/// FailureOrder.
233static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
Tim Northovercadbbe12014-06-13 19:43:04 +0000234 bool IsWeak, llvm::Value *Dest,
235 llvm::Value *Ptr, llvm::Value *Val1,
236 llvm::Value *Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000237 llvm::Value *FailureOrderVal,
238 uint64_t Size, unsigned Align,
239 llvm::AtomicOrdering SuccessOrder) {
240 llvm::AtomicOrdering FailureOrder;
241 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
242 switch (FO->getSExtValue()) {
243 default:
244 FailureOrder = llvm::Monotonic;
245 break;
246 case AtomicExpr::AO_ABI_memory_order_consume:
247 case AtomicExpr::AO_ABI_memory_order_acquire:
248 FailureOrder = llvm::Acquire;
249 break;
250 case AtomicExpr::AO_ABI_memory_order_seq_cst:
251 FailureOrder = llvm::SequentiallyConsistent;
252 break;
253 }
254 if (FailureOrder >= SuccessOrder) {
255 // Don't assert on undefined behaviour.
256 FailureOrder =
257 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
258 }
Tim Northovercadbbe12014-06-13 19:43:04 +0000259 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align,
260 SuccessOrder, FailureOrder);
Tim Northover9c177222014-03-13 19:25:48 +0000261 return;
262 }
263
264 // Create all the relevant BB's
Craig Topper8a13c412014-05-21 05:09:00 +0000265 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
266 *SeqCstBB = nullptr;
Tim Northover9c177222014-03-13 19:25:48 +0000267 MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
268 if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
269 AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
270 if (SuccessOrder == llvm::SequentiallyConsistent)
271 SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
272
273 llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
274
275 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
276
277 // Emit all the different atomics
278
279 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
280 // doesn't matter unless someone is crazy enough to use something that
281 // doesn't fold to a constant for the ordering.
282 CGF.Builder.SetInsertPoint(MonotonicBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000283 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000284 Size, Align, SuccessOrder, llvm::Monotonic);
285 CGF.Builder.CreateBr(ContBB);
286
287 if (AcquireBB) {
288 CGF.Builder.SetInsertPoint(AcquireBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000289 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000290 Size, Align, SuccessOrder, llvm::Acquire);
291 CGF.Builder.CreateBr(ContBB);
292 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
293 AcquireBB);
294 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
295 AcquireBB);
296 }
297 if (SeqCstBB) {
298 CGF.Builder.SetInsertPoint(SeqCstBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000299 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000300 Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
301 CGF.Builder.CreateBr(ContBB);
302 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
303 SeqCstBB);
304 }
305
306 CGF.Builder.SetInsertPoint(ContBB);
307}
308
309static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
310 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
Tim Northovercadbbe12014-06-13 19:43:04 +0000311 llvm::Value *IsWeak, llvm::Value *FailureOrder,
312 uint64_t Size, unsigned Align,
313 llvm::AtomicOrdering Order) {
John McCallfc207f22013-03-07 21:37:12 +0000314 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
315 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
316
317 switch (E->getOp()) {
318 case AtomicExpr::AO__c11_atomic_init:
319 llvm_unreachable("Already handled!");
320
321 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
Tim Northovercadbbe12014-06-13 19:43:04 +0000322 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
323 FailureOrder, Size, Align, Order);
John McCallfc207f22013-03-07 21:37:12 +0000324 return;
Tim Northovercadbbe12014-06-13 19:43:04 +0000325 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
326 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
327 FailureOrder, Size, Align, Order);
328 return;
329 case AtomicExpr::AO__atomic_compare_exchange:
330 case AtomicExpr::AO__atomic_compare_exchange_n: {
331 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
332 emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
333 Val1, Val2, FailureOrder, Size, Align, Order);
334 } else {
335 // Create all the relevant BB's
336 llvm::BasicBlock *StrongBB =
337 CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
338 llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
339 llvm::BasicBlock *ContBB =
340 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
341
342 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
343 SI->addCase(CGF.Builder.getInt1(false), StrongBB);
344
345 CGF.Builder.SetInsertPoint(StrongBB);
346 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
347 FailureOrder, Size, Align, Order);
348 CGF.Builder.CreateBr(ContBB);
349
350 CGF.Builder.SetInsertPoint(WeakBB);
351 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
352 FailureOrder, Size, Align, Order);
353 CGF.Builder.CreateBr(ContBB);
354
355 CGF.Builder.SetInsertPoint(ContBB);
356 }
357 return;
358 }
John McCallfc207f22013-03-07 21:37:12 +0000359 case AtomicExpr::AO__c11_atomic_load:
360 case AtomicExpr::AO__atomic_load_n:
361 case AtomicExpr::AO__atomic_load: {
362 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
363 Load->setAtomic(Order);
364 Load->setAlignment(Size);
365 Load->setVolatile(E->isVolatile());
366 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
367 StoreDest->setAlignment(Align);
368 return;
369 }
370
371 case AtomicExpr::AO__c11_atomic_store:
372 case AtomicExpr::AO__atomic_store:
373 case AtomicExpr::AO__atomic_store_n: {
374 assert(!Dest && "Store does not return a value");
375 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
376 LoadVal1->setAlignment(Align);
377 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
378 Store->setAtomic(Order);
379 Store->setAlignment(Size);
380 Store->setVolatile(E->isVolatile());
381 return;
382 }
383
384 case AtomicExpr::AO__c11_atomic_exchange:
385 case AtomicExpr::AO__atomic_exchange_n:
386 case AtomicExpr::AO__atomic_exchange:
387 Op = llvm::AtomicRMWInst::Xchg;
388 break;
389
390 case AtomicExpr::AO__atomic_add_fetch:
391 PostOp = llvm::Instruction::Add;
392 // Fall through.
393 case AtomicExpr::AO__c11_atomic_fetch_add:
394 case AtomicExpr::AO__atomic_fetch_add:
395 Op = llvm::AtomicRMWInst::Add;
396 break;
397
398 case AtomicExpr::AO__atomic_sub_fetch:
399 PostOp = llvm::Instruction::Sub;
400 // Fall through.
401 case AtomicExpr::AO__c11_atomic_fetch_sub:
402 case AtomicExpr::AO__atomic_fetch_sub:
403 Op = llvm::AtomicRMWInst::Sub;
404 break;
405
406 case AtomicExpr::AO__atomic_and_fetch:
407 PostOp = llvm::Instruction::And;
408 // Fall through.
409 case AtomicExpr::AO__c11_atomic_fetch_and:
410 case AtomicExpr::AO__atomic_fetch_and:
411 Op = llvm::AtomicRMWInst::And;
412 break;
413
414 case AtomicExpr::AO__atomic_or_fetch:
415 PostOp = llvm::Instruction::Or;
416 // Fall through.
417 case AtomicExpr::AO__c11_atomic_fetch_or:
418 case AtomicExpr::AO__atomic_fetch_or:
419 Op = llvm::AtomicRMWInst::Or;
420 break;
421
422 case AtomicExpr::AO__atomic_xor_fetch:
423 PostOp = llvm::Instruction::Xor;
424 // Fall through.
425 case AtomicExpr::AO__c11_atomic_fetch_xor:
426 case AtomicExpr::AO__atomic_fetch_xor:
427 Op = llvm::AtomicRMWInst::Xor;
428 break;
429
430 case AtomicExpr::AO__atomic_nand_fetch:
431 PostOp = llvm::Instruction::And;
432 // Fall through.
433 case AtomicExpr::AO__atomic_fetch_nand:
434 Op = llvm::AtomicRMWInst::Nand;
435 break;
436 }
437
438 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
439 LoadVal1->setAlignment(Align);
440 llvm::AtomicRMWInst *RMWI =
441 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
442 RMWI->setVolatile(E->isVolatile());
443
444 // For __atomic_*_fetch operations, perform the operation again to
445 // determine the value which was written.
446 llvm::Value *Result = RMWI;
447 if (PostOp)
448 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
449 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
450 Result = CGF.Builder.CreateNot(Result);
451 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
452 StoreDest->setAlignment(Align);
453}
454
455// This function emits any expression (scalar, complex, or aggregate)
456// into a temporary alloca.
457static llvm::Value *
458EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
459 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
460 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
461 /*Init*/ true);
462 return DeclPtr;
463}
464
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000465static void
466AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000467 bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000468 SourceLocation Loc, CharUnits SizeInChars) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000469 if (UseOptimizedLibcall) {
470 // Load value and pass it to the function directly.
471 unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
David Majnemer0392cf82014-08-29 07:27:49 +0000472 int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
473 ValTy =
474 CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
475 llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
476 SizeInBits)->getPointerTo();
477 Val = CGF.EmitLoadOfScalar(CGF.Builder.CreateBitCast(Val, IPtrTy), false,
478 Align, CGF.getContext().getPointerType(ValTy),
479 Loc);
480 // Coerce the value into an appropriately sized integer type.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000481 Args.add(RValue::get(Val), ValTy);
482 } else {
483 // Non-optimized functions always take a reference.
484 Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
485 CGF.getContext().VoidPtrTy);
486 }
487}
488
John McCallfc207f22013-03-07 21:37:12 +0000489RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
490 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
491 QualType MemTy = AtomicTy;
492 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
493 MemTy = AT->getValueType();
494 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
495 uint64_t Size = sizeChars.getQuantity();
496 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
497 unsigned Align = alignChars.getQuantity();
498 unsigned MaxInlineWidthInBits =
John McCallc8e01702013-04-16 22:48:15 +0000499 getTarget().getMaxAtomicInlineWidth();
John McCallfc207f22013-03-07 21:37:12 +0000500 bool UseLibcall = (Size != Align ||
501 getContext().toBits(sizeChars) > MaxInlineWidthInBits);
502
Tim Northovercadbbe12014-06-13 19:43:04 +0000503 llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr,
504 *Val2 = nullptr;
Craig Topper8a13c412014-05-21 05:09:00 +0000505 llvm::Value *Ptr = EmitScalarExpr(E->getPtr());
John McCallfc207f22013-03-07 21:37:12 +0000506
507 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
508 assert(!Dest && "Init does not return a value");
John McCalla8ec7eb2013-03-07 21:37:17 +0000509 LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
510 EmitAtomicInit(E->getVal1(), lvalue);
Craig Topper8a13c412014-05-21 05:09:00 +0000511 return RValue::get(nullptr);
John McCallfc207f22013-03-07 21:37:12 +0000512 }
513
Craig Topper8a13c412014-05-21 05:09:00 +0000514 llvm::Value *Order = EmitScalarExpr(E->getOrder());
John McCallfc207f22013-03-07 21:37:12 +0000515
516 switch (E->getOp()) {
517 case AtomicExpr::AO__c11_atomic_init:
518 llvm_unreachable("Already handled!");
519
520 case AtomicExpr::AO__c11_atomic_load:
521 case AtomicExpr::AO__atomic_load_n:
522 break;
523
524 case AtomicExpr::AO__atomic_load:
525 Dest = EmitScalarExpr(E->getVal1());
526 break;
527
528 case AtomicExpr::AO__atomic_store:
529 Val1 = EmitScalarExpr(E->getVal1());
530 break;
531
532 case AtomicExpr::AO__atomic_exchange:
533 Val1 = EmitScalarExpr(E->getVal1());
534 Dest = EmitScalarExpr(E->getVal2());
535 break;
536
537 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
538 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
539 case AtomicExpr::AO__atomic_compare_exchange_n:
540 case AtomicExpr::AO__atomic_compare_exchange:
541 Val1 = EmitScalarExpr(E->getVal1());
542 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
543 Val2 = EmitScalarExpr(E->getVal2());
544 else
545 Val2 = EmitValToTemp(*this, E->getVal2());
546 OrderFail = EmitScalarExpr(E->getOrderFail());
John McCallfc207f22013-03-07 21:37:12 +0000547 if (E->getNumSubExprs() == 6)
Tim Northovercadbbe12014-06-13 19:43:04 +0000548 IsWeak = EmitScalarExpr(E->getWeak());
John McCallfc207f22013-03-07 21:37:12 +0000549 break;
550
551 case AtomicExpr::AO__c11_atomic_fetch_add:
552 case AtomicExpr::AO__c11_atomic_fetch_sub:
553 if (MemTy->isPointerType()) {
554 // For pointer arithmetic, we're required to do a bit of math:
555 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
556 // ... but only for the C11 builtins. The GNU builtins expect the
557 // user to multiply by sizeof(T).
558 QualType Val1Ty = E->getVal1()->getType();
559 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
560 CharUnits PointeeIncAmt =
561 getContext().getTypeSizeInChars(MemTy->getPointeeType());
562 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
563 Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
564 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
565 break;
566 }
567 // Fall through.
568 case AtomicExpr::AO__atomic_fetch_add:
569 case AtomicExpr::AO__atomic_fetch_sub:
570 case AtomicExpr::AO__atomic_add_fetch:
571 case AtomicExpr::AO__atomic_sub_fetch:
572 case AtomicExpr::AO__c11_atomic_store:
573 case AtomicExpr::AO__c11_atomic_exchange:
574 case AtomicExpr::AO__atomic_store_n:
575 case AtomicExpr::AO__atomic_exchange_n:
576 case AtomicExpr::AO__c11_atomic_fetch_and:
577 case AtomicExpr::AO__c11_atomic_fetch_or:
578 case AtomicExpr::AO__c11_atomic_fetch_xor:
579 case AtomicExpr::AO__atomic_fetch_and:
580 case AtomicExpr::AO__atomic_fetch_or:
581 case AtomicExpr::AO__atomic_fetch_xor:
582 case AtomicExpr::AO__atomic_fetch_nand:
583 case AtomicExpr::AO__atomic_and_fetch:
584 case AtomicExpr::AO__atomic_or_fetch:
585 case AtomicExpr::AO__atomic_xor_fetch:
586 case AtomicExpr::AO__atomic_nand_fetch:
587 Val1 = EmitValToTemp(*this, E->getVal1());
588 break;
589 }
590
David Majnemeree8d04d2014-12-12 08:16:09 +0000591 QualType RValTy = E->getType().getUnqualifiedType();
592
David Majnemer659be552014-11-25 23:44:32 +0000593 auto GetDest = [&] {
David Majnemeree8d04d2014-12-12 08:16:09 +0000594 if (!RValTy->isVoidType() && !Dest) {
595 Dest = CreateMemTemp(RValTy, ".atomicdst");
596 }
David Majnemer659be552014-11-25 23:44:32 +0000597 return Dest;
598 };
John McCallfc207f22013-03-07 21:37:12 +0000599
600 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
601 if (UseLibcall) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000602 bool UseOptimizedLibcall = false;
603 switch (E->getOp()) {
604 case AtomicExpr::AO__c11_atomic_fetch_add:
605 case AtomicExpr::AO__atomic_fetch_add:
606 case AtomicExpr::AO__c11_atomic_fetch_and:
607 case AtomicExpr::AO__atomic_fetch_and:
608 case AtomicExpr::AO__c11_atomic_fetch_or:
609 case AtomicExpr::AO__atomic_fetch_or:
610 case AtomicExpr::AO__c11_atomic_fetch_sub:
611 case AtomicExpr::AO__atomic_fetch_sub:
612 case AtomicExpr::AO__c11_atomic_fetch_xor:
613 case AtomicExpr::AO__atomic_fetch_xor:
614 // For these, only library calls for certain sizes exist.
615 UseOptimizedLibcall = true;
616 break;
617 default:
618 // Only use optimized library calls for sizes for which they exist.
619 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
620 UseOptimizedLibcall = true;
621 break;
622 }
John McCallfc207f22013-03-07 21:37:12 +0000623
John McCallfc207f22013-03-07 21:37:12 +0000624 CallArgList Args;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000625 if (!UseOptimizedLibcall) {
626 // For non-optimized library calls, the size is the first parameter
627 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
628 getContext().getSizeType());
629 }
630 // Atomic address is the first or second parameter
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000631 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
John McCallfc207f22013-03-07 21:37:12 +0000632
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000633 std::string LibCallName;
Logan Chien74798a32014-03-26 17:35:01 +0000634 QualType LoweredMemTy =
635 MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000636 QualType RetTy;
637 bool HaveRetTy = false;
John McCallfc207f22013-03-07 21:37:12 +0000638 switch (E->getOp()) {
639 // There is only one libcall for compare an exchange, because there is no
640 // optimisation benefit possible from a libcall version of a weak compare
641 // and exchange.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000642 // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
John McCallfc207f22013-03-07 21:37:12 +0000643 // void *desired, int success, int failure)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000644 // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
645 // int success, int failure)
John McCallfc207f22013-03-07 21:37:12 +0000646 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
647 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
648 case AtomicExpr::AO__atomic_compare_exchange:
649 case AtomicExpr::AO__atomic_compare_exchange_n:
650 LibCallName = "__atomic_compare_exchange";
651 RetTy = getContext().BoolTy;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000652 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000653 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
654 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000655 E->getExprLoc(), sizeChars);
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000656 Args.add(RValue::get(Order), getContext().IntTy);
John McCallfc207f22013-03-07 21:37:12 +0000657 Order = OrderFail;
658 break;
659 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
660 // int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000661 // T __atomic_exchange_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000662 case AtomicExpr::AO__c11_atomic_exchange:
663 case AtomicExpr::AO__atomic_exchange_n:
664 case AtomicExpr::AO__atomic_exchange:
665 LibCallName = "__atomic_exchange";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000666 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000667 E->getExprLoc(), sizeChars);
John McCallfc207f22013-03-07 21:37:12 +0000668 break;
669 // void __atomic_store(size_t size, void *mem, void *val, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000670 // void __atomic_store_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000671 case AtomicExpr::AO__c11_atomic_store:
672 case AtomicExpr::AO__atomic_store:
673 case AtomicExpr::AO__atomic_store_n:
674 LibCallName = "__atomic_store";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000675 RetTy = getContext().VoidTy;
676 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000677 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000678 E->getExprLoc(), sizeChars);
John McCallfc207f22013-03-07 21:37:12 +0000679 break;
680 // void __atomic_load(size_t size, void *mem, void *return, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000681 // T __atomic_load_N(T *mem, int order)
John McCallfc207f22013-03-07 21:37:12 +0000682 case AtomicExpr::AO__c11_atomic_load:
683 case AtomicExpr::AO__atomic_load:
684 case AtomicExpr::AO__atomic_load_n:
685 LibCallName = "__atomic_load";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000686 break;
687 // T __atomic_fetch_add_N(T *mem, T val, int order)
688 case AtomicExpr::AO__c11_atomic_fetch_add:
689 case AtomicExpr::AO__atomic_fetch_add:
690 LibCallName = "__atomic_fetch_add";
Logan Chien74798a32014-03-26 17:35:01 +0000691 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000692 E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000693 break;
694 // T __atomic_fetch_and_N(T *mem, T val, int order)
695 case AtomicExpr::AO__c11_atomic_fetch_and:
696 case AtomicExpr::AO__atomic_fetch_and:
697 LibCallName = "__atomic_fetch_and";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000698 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000699 E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000700 break;
701 // T __atomic_fetch_or_N(T *mem, T val, int order)
702 case AtomicExpr::AO__c11_atomic_fetch_or:
703 case AtomicExpr::AO__atomic_fetch_or:
704 LibCallName = "__atomic_fetch_or";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000705 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000706 E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000707 break;
708 // T __atomic_fetch_sub_N(T *mem, T val, int order)
709 case AtomicExpr::AO__c11_atomic_fetch_sub:
710 case AtomicExpr::AO__atomic_fetch_sub:
711 LibCallName = "__atomic_fetch_sub";
Logan Chien74798a32014-03-26 17:35:01 +0000712 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000713 E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000714 break;
715 // T __atomic_fetch_xor_N(T *mem, T val, int order)
716 case AtomicExpr::AO__c11_atomic_fetch_xor:
717 case AtomicExpr::AO__atomic_fetch_xor:
718 LibCallName = "__atomic_fetch_xor";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000719 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000720 E->getExprLoc(), sizeChars);
John McCallfc207f22013-03-07 21:37:12 +0000721 break;
John McCallfc207f22013-03-07 21:37:12 +0000722 default: return EmitUnsupportedRValue(E, "atomic library call");
723 }
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000724
725 // Optimized functions have the size in their name.
726 if (UseOptimizedLibcall)
727 LibCallName += "_" + llvm::utostr(Size);
728 // By default, assume we return a value of the atomic type.
729 if (!HaveRetTy) {
730 if (UseOptimizedLibcall) {
731 // Value is returned directly.
David Majnemer0392cf82014-08-29 07:27:49 +0000732 // The function returns an appropriately sized integer type.
733 RetTy = getContext().getIntTypeForBitwidth(
734 getContext().toBits(sizeChars), /*Signed=*/false);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000735 } else {
736 // Value is returned through parameter before the order.
737 RetTy = getContext().VoidTy;
David Majnemer659be552014-11-25 23:44:32 +0000738 Args.add(RValue::get(EmitCastToVoidPtr(Dest)), getContext().VoidPtrTy);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000739 }
740 }
John McCallfc207f22013-03-07 21:37:12 +0000741 // order is always the last parameter
742 Args.add(RValue::get(Order),
743 getContext().IntTy);
744
David Majnemer659be552014-11-25 23:44:32 +0000745 RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
746 // The value is returned directly from the libcall.
747 if (HaveRetTy && !RetTy->isVoidType())
748 return Res;
749 // The value is returned via an explicit out param.
750 if (RetTy->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +0000751 return RValue::get(nullptr);
David Majnemer659be552014-11-25 23:44:32 +0000752 // The value is returned directly for optimized libcalls but the caller is
753 // expected an out-param.
754 if (UseOptimizedLibcall) {
755 llvm::Value *ResVal = Res.getScalarVal();
756 llvm::StoreInst *StoreDest = Builder.CreateStore(
757 ResVal,
758 Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo()));
759 StoreDest->setAlignment(Align);
760 }
David Majnemeree8d04d2014-12-12 08:16:09 +0000761 return convertTempToRValue(Dest, RValTy, E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000762 }
763
764 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
765 E->getOp() == AtomicExpr::AO__atomic_store ||
766 E->getOp() == AtomicExpr::AO__atomic_store_n;
767 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
768 E->getOp() == AtomicExpr::AO__atomic_load ||
769 E->getOp() == AtomicExpr::AO__atomic_load_n;
770
David Majnemerd8cd8f72014-11-22 10:44:12 +0000771 llvm::Type *ITy =
772 llvm::IntegerType::get(getLLVMContext(), Size * 8);
David Majnemer659be552014-11-25 23:44:32 +0000773 llvm::Value *OrigDest = GetDest();
David Majnemerd8cd8f72014-11-22 10:44:12 +0000774 Ptr = Builder.CreateBitCast(
775 Ptr, ITy->getPointerTo(Ptr->getType()->getPointerAddressSpace()));
776 if (Val1) Val1 = Builder.CreateBitCast(Val1, ITy->getPointerTo());
777 if (Val2) Val2 = Builder.CreateBitCast(Val2, ITy->getPointerTo());
778 if (Dest && !E->isCmpXChg())
779 Dest = Builder.CreateBitCast(Dest, ITy->getPointerTo());
John McCallfc207f22013-03-07 21:37:12 +0000780
781 if (isa<llvm::ConstantInt>(Order)) {
782 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
783 switch (ord) {
Tim Northovere94a34c2014-03-11 10:49:14 +0000784 case AtomicExpr::AO_ABI_memory_order_relaxed:
Tim Northovercadbbe12014-06-13 19:43:04 +0000785 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000786 Size, Align, llvm::Monotonic);
John McCallfc207f22013-03-07 21:37:12 +0000787 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000788 case AtomicExpr::AO_ABI_memory_order_consume:
789 case AtomicExpr::AO_ABI_memory_order_acquire:
John McCallfc207f22013-03-07 21:37:12 +0000790 if (IsStore)
791 break; // Avoid crashing on code with undefined behavior
Tim Northovercadbbe12014-06-13 19:43:04 +0000792 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000793 Size, Align, llvm::Acquire);
John McCallfc207f22013-03-07 21:37:12 +0000794 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000795 case AtomicExpr::AO_ABI_memory_order_release:
John McCallfc207f22013-03-07 21:37:12 +0000796 if (IsLoad)
797 break; // Avoid crashing on code with undefined behavior
Tim Northovercadbbe12014-06-13 19:43:04 +0000798 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000799 Size, Align, llvm::Release);
John McCallfc207f22013-03-07 21:37:12 +0000800 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000801 case AtomicExpr::AO_ABI_memory_order_acq_rel:
John McCallfc207f22013-03-07 21:37:12 +0000802 if (IsLoad || IsStore)
803 break; // Avoid crashing on code with undefined behavior
Tim Northovercadbbe12014-06-13 19:43:04 +0000804 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000805 Size, Align, llvm::AcquireRelease);
John McCallfc207f22013-03-07 21:37:12 +0000806 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000807 case AtomicExpr::AO_ABI_memory_order_seq_cst:
Tim Northovercadbbe12014-06-13 19:43:04 +0000808 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000809 Size, Align, llvm::SequentiallyConsistent);
John McCallfc207f22013-03-07 21:37:12 +0000810 break;
811 default: // invalid order
812 // We should not ever get here normally, but it's hard to
813 // enforce that in general.
814 break;
815 }
David Majnemeree8d04d2014-12-12 08:16:09 +0000816 if (RValTy->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +0000817 return RValue::get(nullptr);
David Majnemeree8d04d2014-12-12 08:16:09 +0000818 return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000819 }
820
821 // Long case, when Order isn't obviously constant.
822
823 // Create all the relevant BB's
Craig Topper8a13c412014-05-21 05:09:00 +0000824 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
825 *ReleaseBB = nullptr, *AcqRelBB = nullptr,
826 *SeqCstBB = nullptr;
John McCallfc207f22013-03-07 21:37:12 +0000827 MonotonicBB = createBasicBlock("monotonic", CurFn);
828 if (!IsStore)
829 AcquireBB = createBasicBlock("acquire", CurFn);
830 if (!IsLoad)
831 ReleaseBB = createBasicBlock("release", CurFn);
832 if (!IsLoad && !IsStore)
833 AcqRelBB = createBasicBlock("acqrel", CurFn);
834 SeqCstBB = createBasicBlock("seqcst", CurFn);
835 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
836
837 // Create the switch for the split
838 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
839 // doesn't matter unless someone is crazy enough to use something that
840 // doesn't fold to a constant for the ordering.
841 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
842 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
843
844 // Emit all the different atomics
845 Builder.SetInsertPoint(MonotonicBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000846 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000847 Size, Align, llvm::Monotonic);
John McCallfc207f22013-03-07 21:37:12 +0000848 Builder.CreateBr(ContBB);
849 if (!IsStore) {
850 Builder.SetInsertPoint(AcquireBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000851 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000852 Size, Align, llvm::Acquire);
John McCallfc207f22013-03-07 21:37:12 +0000853 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000854 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
855 AcquireBB);
856 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
857 AcquireBB);
John McCallfc207f22013-03-07 21:37:12 +0000858 }
859 if (!IsLoad) {
860 Builder.SetInsertPoint(ReleaseBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000861 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000862 Size, Align, llvm::Release);
John McCallfc207f22013-03-07 21:37:12 +0000863 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000864 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
865 ReleaseBB);
John McCallfc207f22013-03-07 21:37:12 +0000866 }
867 if (!IsLoad && !IsStore) {
868 Builder.SetInsertPoint(AcqRelBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000869 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000870 Size, Align, llvm::AcquireRelease);
John McCallfc207f22013-03-07 21:37:12 +0000871 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000872 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
873 AcqRelBB);
John McCallfc207f22013-03-07 21:37:12 +0000874 }
875 Builder.SetInsertPoint(SeqCstBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000876 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000877 Size, Align, llvm::SequentiallyConsistent);
John McCallfc207f22013-03-07 21:37:12 +0000878 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000879 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
880 SeqCstBB);
John McCallfc207f22013-03-07 21:37:12 +0000881
882 // Cleanup and return
883 Builder.SetInsertPoint(ContBB);
David Majnemeree8d04d2014-12-12 08:16:09 +0000884 if (RValTy->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +0000885 return RValue::get(nullptr);
David Majnemeree8d04d2014-12-12 08:16:09 +0000886 return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000887}
John McCalla8ec7eb2013-03-07 21:37:17 +0000888
889llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
890 unsigned addrspace =
891 cast<llvm::PointerType>(addr->getType())->getAddressSpace();
892 llvm::IntegerType *ty =
893 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
894 return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
895}
896
897RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000898 AggValueSlot resultSlot,
899 SourceLocation loc) const {
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000900 if (EvaluationKind == TEK_Aggregate)
901 return resultSlot.asRValue();
John McCalla8ec7eb2013-03-07 21:37:17 +0000902
903 // Drill into the padding structure if we have one.
904 if (hasPadding())
905 addr = CGF.Builder.CreateStructGEP(addr, 0);
906
John McCalla8ec7eb2013-03-07 21:37:17 +0000907 // Otherwise, just convert the temporary to an r-value using the
908 // normal conversion routine.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000909 return CGF.convertTempToRValue(addr, getValueType(), loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000910}
911
912/// Emit a load from an l-value of atomic type. Note that the r-value
913/// we produce is an r-value of the atomic *value* type.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000914RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
915 AggValueSlot resultSlot) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000916 AtomicInfo atomics(*this, src);
917
918 // Check whether we should use a library call.
919 if (atomics.shouldUseLibcall()) {
920 llvm::Value *tempAddr;
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000921 if (!resultSlot.isIgnored()) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000922 assert(atomics.getEvaluationKind() == TEK_Aggregate);
923 tempAddr = resultSlot.getAddr();
924 } else {
925 tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
926 }
927
928 // void __atomic_load(size_t size, void *mem, void *return, int order);
929 CallArgList args;
930 args.add(RValue::get(atomics.getAtomicSizeValue()),
931 getContext().getSizeType());
932 args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
933 getContext().VoidPtrTy);
934 args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
935 getContext().VoidPtrTy);
Tim Northovere94a34c2014-03-11 10:49:14 +0000936 args.add(RValue::get(llvm::ConstantInt::get(
937 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
John McCalla8ec7eb2013-03-07 21:37:17 +0000938 getContext().IntTy);
939 emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
940
941 // Produce the r-value.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000942 return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000943 }
944
945 // Okay, we're doing this natively.
946 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
947 llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
948 load->setAtomic(llvm::SequentiallyConsistent);
949
950 // Other decoration.
951 load->setAlignment(src.getAlignment().getQuantity());
952 if (src.isVolatileQualified())
953 load->setVolatile(true);
954 if (src.getTBAAInfo())
955 CGM.DecorateInstruction(load, src.getTBAAInfo());
956
957 // Okay, turn that back into the original value type.
958 QualType valueType = atomics.getValueType();
959 llvm::Value *result = load;
960
961 // If we're ignoring an aggregate return, don't do anything.
962 if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
Craig Topper8a13c412014-05-21 05:09:00 +0000963 return RValue::getAggregate(nullptr, false);
John McCalla8ec7eb2013-03-07 21:37:17 +0000964
965 // The easiest way to do this this is to go through memory, but we
966 // try not to in some easy cases.
967 if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
968 llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
969 if (isa<llvm::IntegerType>(resultTy)) {
970 assert(result->getType() == resultTy);
971 result = EmitFromMemory(result, valueType);
972 } else if (isa<llvm::PointerType>(resultTy)) {
973 result = Builder.CreateIntToPtr(result, resultTy);
974 } else {
975 result = Builder.CreateBitCast(result, resultTy);
976 }
977 return RValue::get(result);
978 }
979
980 // Create a temporary. This needs to be big enough to hold the
981 // atomic integer.
982 llvm::Value *temp;
983 bool tempIsVolatile = false;
984 CharUnits tempAlignment;
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000985 if (atomics.getEvaluationKind() == TEK_Aggregate) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000986 assert(!resultSlot.isIgnored());
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000987 temp = resultSlot.getAddr();
988 tempAlignment = atomics.getValueAlignment();
John McCalla8ec7eb2013-03-07 21:37:17 +0000989 tempIsVolatile = resultSlot.isVolatile();
990 } else {
991 temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
992 tempAlignment = atomics.getAtomicAlignment();
993 }
994
995 // Slam the integer into the temporary.
996 llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
997 Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
998 ->setVolatile(tempIsVolatile);
999
Nick Lewycky2d84e842013-10-02 02:29:49 +00001000 return atomics.convertTempToRValue(temp, resultSlot, loc);
John McCalla8ec7eb2013-03-07 21:37:17 +00001001}
1002
1003
1004
1005/// Copy an r-value into memory as part of storing to an atomic type.
1006/// This needs to create a bit-pattern suitable for atomic operations.
1007void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
1008 // If we have an r-value, the rvalue should be of the atomic type,
1009 // which means that the caller is responsible for having zeroed
1010 // any padding. Just do an aggregate copy of that type.
1011 if (rvalue.isAggregate()) {
1012 CGF.EmitAggregateCopy(dest.getAddress(),
1013 rvalue.getAggregateAddr(),
1014 getAtomicType(),
1015 (rvalue.isVolatileQualified()
1016 || dest.isVolatileQualified()),
1017 dest.getAlignment());
1018 return;
1019 }
1020
1021 // Okay, otherwise we're copying stuff.
1022
1023 // Zero out the buffer if necessary.
1024 emitMemSetZeroIfNecessary(dest);
1025
1026 // Drill past the padding if present.
1027 dest = projectValue(dest);
1028
1029 // Okay, store the rvalue in.
1030 if (rvalue.isScalar()) {
1031 CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
1032 } else {
1033 CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
1034 }
1035}
1036
1037
1038/// Materialize an r-value into memory for the purposes of storing it
1039/// to an atomic type.
1040llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
1041 // Aggregate r-values are already in memory, and EmitAtomicStore
1042 // requires them to be values of the atomic type.
1043 if (rvalue.isAggregate())
1044 return rvalue.getAggregateAddr();
1045
1046 // Otherwise, make a temporary and materialize into it.
1047 llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
1048 LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
1049 emitCopyIntoMemory(rvalue, tempLV);
1050 return temp;
1051}
1052
1053/// Emit a store to an l-value of atomic type.
1054///
1055/// Note that the r-value is expected to be an r-value *of the atomic
1056/// type*; this means that for aggregate r-values, it should include
1057/// storage for any padding that was necessary.
Nick Lewycky5fa40c32013-10-01 21:51:38 +00001058void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
John McCalla8ec7eb2013-03-07 21:37:17 +00001059 // If this is an aggregate r-value, it should agree in type except
1060 // maybe for address-space qualification.
1061 assert(!rvalue.isAggregate() ||
1062 rvalue.getAggregateAddr()->getType()->getPointerElementType()
1063 == dest.getAddress()->getType()->getPointerElementType());
1064
1065 AtomicInfo atomics(*this, dest);
1066
1067 // If this is an initialization, just put the value there normally.
1068 if (isInit) {
1069 atomics.emitCopyIntoMemory(rvalue, dest);
1070 return;
1071 }
1072
1073 // Check whether we should use a library call.
1074 if (atomics.shouldUseLibcall()) {
1075 // Produce a source address.
1076 llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
1077
1078 // void __atomic_store(size_t size, void *mem, void *val, int order)
1079 CallArgList args;
1080 args.add(RValue::get(atomics.getAtomicSizeValue()),
1081 getContext().getSizeType());
1082 args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
1083 getContext().VoidPtrTy);
1084 args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
1085 getContext().VoidPtrTy);
Tim Northovere94a34c2014-03-11 10:49:14 +00001086 args.add(RValue::get(llvm::ConstantInt::get(
1087 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
John McCalla8ec7eb2013-03-07 21:37:17 +00001088 getContext().IntTy);
1089 emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1090 return;
1091 }
1092
1093 // Okay, we're doing this natively.
1094 llvm::Value *intValue;
1095
1096 // If we've got a scalar value of the right size, try to avoid going
1097 // through memory.
1098 if (rvalue.isScalar() && !atomics.hasPadding()) {
1099 llvm::Value *value = rvalue.getScalarVal();
1100 if (isa<llvm::IntegerType>(value->getType())) {
1101 intValue = value;
1102 } else {
1103 llvm::IntegerType *inputIntTy =
1104 llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
1105 if (isa<llvm::PointerType>(value->getType())) {
1106 intValue = Builder.CreatePtrToInt(value, inputIntTy);
1107 } else {
1108 intValue = Builder.CreateBitCast(value, inputIntTy);
1109 }
1110 }
1111
1112 // Otherwise, we need to go through memory.
1113 } else {
1114 // Put the r-value in memory.
1115 llvm::Value *addr = atomics.materializeRValue(rvalue);
1116
1117 // Cast the temporary to the atomic int type and pull a value out.
1118 addr = atomics.emitCastToAtomicIntPointer(addr);
1119 intValue = Builder.CreateAlignedLoad(addr,
1120 atomics.getAtomicAlignment().getQuantity());
1121 }
1122
1123 // Do the atomic store.
1124 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
1125 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1126
1127 // Initializations don't need to be atomic.
1128 if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
1129
1130 // Other decoration.
1131 store->setAlignment(dest.getAlignment().getQuantity());
1132 if (dest.isVolatileQualified())
1133 store->setVolatile(true);
1134 if (dest.getTBAAInfo())
1135 CGM.DecorateInstruction(store, dest.getTBAAInfo());
1136}
1137
1138void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1139 AtomicInfo atomics(*this, dest);
1140
1141 switch (atomics.getEvaluationKind()) {
1142 case TEK_Scalar: {
1143 llvm::Value *value = EmitScalarExpr(init);
1144 atomics.emitCopyIntoMemory(RValue::get(value), dest);
1145 return;
1146 }
1147
1148 case TEK_Complex: {
1149 ComplexPairTy value = EmitComplexExpr(init);
1150 atomics.emitCopyIntoMemory(RValue::getComplex(value), dest);
1151 return;
1152 }
1153
1154 case TEK_Aggregate: {
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001155 // Fix up the destination if the initializer isn't an expression
1156 // of atomic type.
1157 bool Zeroed = false;
John McCalla8ec7eb2013-03-07 21:37:17 +00001158 if (!init->getType()->isAtomicType()) {
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001159 Zeroed = atomics.emitMemSetZeroIfNecessary(dest);
John McCalla8ec7eb2013-03-07 21:37:17 +00001160 dest = atomics.projectValue(dest);
1161 }
1162
1163 // Evaluate the expression directly into the destination.
1164 AggValueSlot slot = AggValueSlot::forLValue(dest,
1165 AggValueSlot::IsNotDestructed,
1166 AggValueSlot::DoesNotNeedGCBarriers,
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001167 AggValueSlot::IsNotAliased,
1168 Zeroed ? AggValueSlot::IsZeroed :
1169 AggValueSlot::IsNotZeroed);
1170
John McCalla8ec7eb2013-03-07 21:37:17 +00001171 EmitAggExpr(init, slot);
1172 return;
1173 }
1174 }
1175 llvm_unreachable("bad evaluation kind");
1176}