blob: 18eb065226cb5f131727255fddf0ab9a51d0ad58 [file] [log] [blame]
John McCallfc207f22013-03-07 21:37:12 +00001//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the code for emitting atomic operations.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCall.h"
16#include "CodeGenModule.h"
17#include "clang/AST/ASTContext.h"
Mark Laceya8e7df32013-10-30 21:53:58 +000018#include "clang/CodeGen/CGFunctionInfo.h"
Ed Schoutenc7e82bd2013-05-31 19:27:59 +000019#include "llvm/ADT/StringExtras.h"
John McCallfc207f22013-03-07 21:37:12 +000020#include "llvm/IR/DataLayout.h"
21#include "llvm/IR/Intrinsics.h"
John McCalla8ec7eb2013-03-07 21:37:17 +000022#include "llvm/IR/Operator.h"
John McCallfc207f22013-03-07 21:37:12 +000023
24using namespace clang;
25using namespace CodeGen;
26
John McCalla8ec7eb2013-03-07 21:37:17 +000027namespace {
28 class AtomicInfo {
29 CodeGenFunction &CGF;
30 QualType AtomicTy;
31 QualType ValueTy;
32 uint64_t AtomicSizeInBits;
33 uint64_t ValueSizeInBits;
34 CharUnits AtomicAlign;
35 CharUnits ValueAlign;
36 CharUnits LValueAlign;
37 TypeEvaluationKind EvaluationKind;
38 bool UseLibcall;
39 public:
40 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
41 assert(lvalue.isSimple());
42
43 AtomicTy = lvalue.getType();
44 ValueTy = AtomicTy->castAs<AtomicType>()->getValueType();
45 EvaluationKind = CGF.getEvaluationKind(ValueTy);
46
47 ASTContext &C = CGF.getContext();
48
49 uint64_t valueAlignInBits;
Benjamin Kramer867ea1d2014-03-02 13:01:17 +000050 std::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy);
John McCalla8ec7eb2013-03-07 21:37:17 +000051
52 uint64_t atomicAlignInBits;
Benjamin Kramer867ea1d2014-03-02 13:01:17 +000053 std::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy);
John McCalla8ec7eb2013-03-07 21:37:17 +000054
55 assert(ValueSizeInBits <= AtomicSizeInBits);
56 assert(valueAlignInBits <= atomicAlignInBits);
57
58 AtomicAlign = C.toCharUnitsFromBits(atomicAlignInBits);
59 ValueAlign = C.toCharUnitsFromBits(valueAlignInBits);
60 if (lvalue.getAlignment().isZero())
61 lvalue.setAlignment(AtomicAlign);
62
63 UseLibcall =
64 (AtomicSizeInBits > uint64_t(C.toBits(lvalue.getAlignment())) ||
65 AtomicSizeInBits > C.getTargetInfo().getMaxAtomicInlineWidth());
66 }
67
68 QualType getAtomicType() const { return AtomicTy; }
69 QualType getValueType() const { return ValueTy; }
70 CharUnits getAtomicAlignment() const { return AtomicAlign; }
71 CharUnits getValueAlignment() const { return ValueAlign; }
72 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
73 uint64_t getValueSizeInBits() const { return AtomicSizeInBits; }
74 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
75 bool shouldUseLibcall() const { return UseLibcall; }
76
77 /// Is the atomic size larger than the underlying value type?
78 ///
79 /// Note that the absence of padding does not mean that atomic
80 /// objects are completely interchangeable with non-atomic
81 /// objects: we might have promoted the alignment of a type
82 /// without making it bigger.
83 bool hasPadding() const {
84 return (ValueSizeInBits != AtomicSizeInBits);
85 }
86
Eli Friedmanbe4504d2013-07-11 01:32:21 +000087 bool emitMemSetZeroIfNecessary(LValue dest) const;
John McCalla8ec7eb2013-03-07 21:37:17 +000088
89 llvm::Value *getAtomicSizeValue() const {
90 CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
91 return CGF.CGM.getSize(size);
92 }
93
94 /// Cast the given pointer to an integer pointer suitable for
95 /// atomic operations.
96 llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
97
98 /// Turn an atomic-layout object into an r-value.
99 RValue convertTempToRValue(llvm::Value *addr,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000100 AggValueSlot resultSlot,
101 SourceLocation loc) const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000102
103 /// Copy an atomic r-value into atomic-layout memory.
104 void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
105
106 /// Project an l-value down to the value field.
107 LValue projectValue(LValue lvalue) const {
108 llvm::Value *addr = lvalue.getAddress();
109 if (hasPadding())
110 addr = CGF.Builder.CreateStructGEP(addr, 0);
111
112 return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(),
113 CGF.getContext(), lvalue.getTBAAInfo());
114 }
115
116 /// Materialize an atomic r-value in atomic-layout memory.
117 llvm::Value *materializeRValue(RValue rvalue) const;
118
119 private:
120 bool requiresMemSetZero(llvm::Type *type) const;
121 };
122}
123
124static RValue emitAtomicLibcall(CodeGenFunction &CGF,
125 StringRef fnName,
126 QualType resultType,
127 CallArgList &args) {
128 const CGFunctionInfo &fnInfo =
129 CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
130 FunctionType::ExtInfo(), RequiredArgs::All);
131 llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
132 llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
133 return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
134}
135
136/// Does a store of the given IR type modify the full expected width?
137static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
138 uint64_t expectedSize) {
139 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
140}
141
142/// Does the atomic type require memsetting to zero before initialization?
143///
144/// The IR type is provided as a way of making certain queries faster.
145bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
146 // If the atomic type has size padding, we definitely need a memset.
147 if (hasPadding()) return true;
148
149 // Otherwise, do some simple heuristics to try to avoid it:
150 switch (getEvaluationKind()) {
151 // For scalars and complexes, check whether the store size of the
152 // type uses the full size.
153 case TEK_Scalar:
154 return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
155 case TEK_Complex:
156 return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
157 AtomicSizeInBits / 2);
158
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000159 // Padding in structs has an undefined bit pattern. User beware.
John McCalla8ec7eb2013-03-07 21:37:17 +0000160 case TEK_Aggregate:
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000161 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000162 }
163 llvm_unreachable("bad evaluation kind");
164}
165
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000166bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
John McCalla8ec7eb2013-03-07 21:37:17 +0000167 llvm::Value *addr = dest.getAddress();
168 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000169 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000170
171 CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
172 AtomicSizeInBits / 8,
173 dest.getAlignment().getQuantity());
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000174 return true;
John McCalla8ec7eb2013-03-07 21:37:17 +0000175}
176
Tim Northover9c177222014-03-13 19:25:48 +0000177static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E,
178 llvm::Value *Dest, llvm::Value *Ptr,
179 llvm::Value *Val1, llvm::Value *Val2,
180 uint64_t Size, unsigned Align,
181 llvm::AtomicOrdering SuccessOrder,
182 llvm::AtomicOrdering FailureOrder) {
183 // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
184 llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
185 Expected->setAlignment(Align);
186 llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
187 Desired->setAlignment(Align);
188
189 llvm::AtomicCmpXchgInst *Old = CGF.Builder.CreateAtomicCmpXchg(
190 Ptr, Expected, Desired, SuccessOrder, FailureOrder);
191 Old->setVolatile(E->isVolatile());
192
193 // Cmp holds the result of the compare-exchange operation: true on success,
194 // false on failure.
195 llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(Old, Expected);
196
197 // This basic block is used to hold the store instruction if the operation
198 // failed.
199 llvm::BasicBlock *StoreExpectedBB =
200 CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
201
202 // This basic block is the exit point of the operation, we should end up
203 // here regardless of whether or not the operation succeeded.
204 llvm::BasicBlock *ContinueBB =
205 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
206
207 // Update Expected if Expected isn't equal to Old, otherwise branch to the
208 // exit point.
209 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
210
211 CGF.Builder.SetInsertPoint(StoreExpectedBB);
212 // Update the memory at Expected with Old's value.
213 llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
214 StoreExpected->setAlignment(Align);
215 // Finally, branch to the exit point.
216 CGF.Builder.CreateBr(ContinueBB);
217
218 CGF.Builder.SetInsertPoint(ContinueBB);
219 // Update the memory at Dest with Cmp's value.
220 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
221 return;
222}
223
224/// Given an ordering required on success, emit all possible cmpxchg
225/// instructions to cope with the provided (but possibly only dynamically known)
226/// FailureOrder.
227static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
228 llvm::Value *Dest, llvm::Value *Ptr,
229 llvm::Value *Val1, llvm::Value *Val2,
230 llvm::Value *FailureOrderVal,
231 uint64_t Size, unsigned Align,
232 llvm::AtomicOrdering SuccessOrder) {
233 llvm::AtomicOrdering FailureOrder;
234 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
235 switch (FO->getSExtValue()) {
236 default:
237 FailureOrder = llvm::Monotonic;
238 break;
239 case AtomicExpr::AO_ABI_memory_order_consume:
240 case AtomicExpr::AO_ABI_memory_order_acquire:
241 FailureOrder = llvm::Acquire;
242 break;
243 case AtomicExpr::AO_ABI_memory_order_seq_cst:
244 FailureOrder = llvm::SequentiallyConsistent;
245 break;
246 }
247 if (FailureOrder >= SuccessOrder) {
248 // Don't assert on undefined behaviour.
249 FailureOrder =
250 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
251 }
252 emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2, Size, Align, SuccessOrder,
253 FailureOrder);
254 return;
255 }
256
257 // Create all the relevant BB's
Craig Topper8a13c412014-05-21 05:09:00 +0000258 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
259 *SeqCstBB = nullptr;
Tim Northover9c177222014-03-13 19:25:48 +0000260 MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
261 if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
262 AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
263 if (SuccessOrder == llvm::SequentiallyConsistent)
264 SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
265
266 llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
267
268 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
269
270 // Emit all the different atomics
271
272 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
273 // doesn't matter unless someone is crazy enough to use something that
274 // doesn't fold to a constant for the ordering.
275 CGF.Builder.SetInsertPoint(MonotonicBB);
276 emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2,
277 Size, Align, SuccessOrder, llvm::Monotonic);
278 CGF.Builder.CreateBr(ContBB);
279
280 if (AcquireBB) {
281 CGF.Builder.SetInsertPoint(AcquireBB);
282 emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2,
283 Size, Align, SuccessOrder, llvm::Acquire);
284 CGF.Builder.CreateBr(ContBB);
285 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
286 AcquireBB);
287 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
288 AcquireBB);
289 }
290 if (SeqCstBB) {
291 CGF.Builder.SetInsertPoint(SeqCstBB);
292 emitAtomicCmpXchg(CGF, E, Dest, Ptr, Val1, Val2,
293 Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
294 CGF.Builder.CreateBr(ContBB);
295 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
296 SeqCstBB);
297 }
298
299 CGF.Builder.SetInsertPoint(ContBB);
300}
301
302static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
303 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
304 llvm::Value *FailureOrder, uint64_t Size,
305 unsigned Align, llvm::AtomicOrdering Order) {
John McCallfc207f22013-03-07 21:37:12 +0000306 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
307 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
308
309 switch (E->getOp()) {
310 case AtomicExpr::AO__c11_atomic_init:
311 llvm_unreachable("Already handled!");
312
313 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
314 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
315 case AtomicExpr::AO__atomic_compare_exchange:
Tim Northover9c177222014-03-13 19:25:48 +0000316 case AtomicExpr::AO__atomic_compare_exchange_n:
317 emitAtomicCmpXchgFailureSet(CGF, E, Dest, Ptr, Val1, Val2, FailureOrder,
318 Size, Align, Order);
John McCallfc207f22013-03-07 21:37:12 +0000319 return;
John McCallfc207f22013-03-07 21:37:12 +0000320 case AtomicExpr::AO__c11_atomic_load:
321 case AtomicExpr::AO__atomic_load_n:
322 case AtomicExpr::AO__atomic_load: {
323 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
324 Load->setAtomic(Order);
325 Load->setAlignment(Size);
326 Load->setVolatile(E->isVolatile());
327 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
328 StoreDest->setAlignment(Align);
329 return;
330 }
331
332 case AtomicExpr::AO__c11_atomic_store:
333 case AtomicExpr::AO__atomic_store:
334 case AtomicExpr::AO__atomic_store_n: {
335 assert(!Dest && "Store does not return a value");
336 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
337 LoadVal1->setAlignment(Align);
338 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
339 Store->setAtomic(Order);
340 Store->setAlignment(Size);
341 Store->setVolatile(E->isVolatile());
342 return;
343 }
344
345 case AtomicExpr::AO__c11_atomic_exchange:
346 case AtomicExpr::AO__atomic_exchange_n:
347 case AtomicExpr::AO__atomic_exchange:
348 Op = llvm::AtomicRMWInst::Xchg;
349 break;
350
351 case AtomicExpr::AO__atomic_add_fetch:
352 PostOp = llvm::Instruction::Add;
353 // Fall through.
354 case AtomicExpr::AO__c11_atomic_fetch_add:
355 case AtomicExpr::AO__atomic_fetch_add:
356 Op = llvm::AtomicRMWInst::Add;
357 break;
358
359 case AtomicExpr::AO__atomic_sub_fetch:
360 PostOp = llvm::Instruction::Sub;
361 // Fall through.
362 case AtomicExpr::AO__c11_atomic_fetch_sub:
363 case AtomicExpr::AO__atomic_fetch_sub:
364 Op = llvm::AtomicRMWInst::Sub;
365 break;
366
367 case AtomicExpr::AO__atomic_and_fetch:
368 PostOp = llvm::Instruction::And;
369 // Fall through.
370 case AtomicExpr::AO__c11_atomic_fetch_and:
371 case AtomicExpr::AO__atomic_fetch_and:
372 Op = llvm::AtomicRMWInst::And;
373 break;
374
375 case AtomicExpr::AO__atomic_or_fetch:
376 PostOp = llvm::Instruction::Or;
377 // Fall through.
378 case AtomicExpr::AO__c11_atomic_fetch_or:
379 case AtomicExpr::AO__atomic_fetch_or:
380 Op = llvm::AtomicRMWInst::Or;
381 break;
382
383 case AtomicExpr::AO__atomic_xor_fetch:
384 PostOp = llvm::Instruction::Xor;
385 // Fall through.
386 case AtomicExpr::AO__c11_atomic_fetch_xor:
387 case AtomicExpr::AO__atomic_fetch_xor:
388 Op = llvm::AtomicRMWInst::Xor;
389 break;
390
391 case AtomicExpr::AO__atomic_nand_fetch:
392 PostOp = llvm::Instruction::And;
393 // Fall through.
394 case AtomicExpr::AO__atomic_fetch_nand:
395 Op = llvm::AtomicRMWInst::Nand;
396 break;
397 }
398
399 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
400 LoadVal1->setAlignment(Align);
401 llvm::AtomicRMWInst *RMWI =
402 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
403 RMWI->setVolatile(E->isVolatile());
404
405 // For __atomic_*_fetch operations, perform the operation again to
406 // determine the value which was written.
407 llvm::Value *Result = RMWI;
408 if (PostOp)
409 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
410 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
411 Result = CGF.Builder.CreateNot(Result);
412 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
413 StoreDest->setAlignment(Align);
414}
415
416// This function emits any expression (scalar, complex, or aggregate)
417// into a temporary alloca.
418static llvm::Value *
419EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
420 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
421 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
422 /*Init*/ true);
423 return DeclPtr;
424}
425
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000426static void
427AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000428 bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
429 SourceLocation Loc) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000430 if (UseOptimizedLibcall) {
431 // Load value and pass it to the function directly.
432 unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
Nick Lewycky2d84e842013-10-02 02:29:49 +0000433 Val = CGF.EmitLoadOfScalar(Val, false, Align, ValTy, Loc);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000434 Args.add(RValue::get(Val), ValTy);
435 } else {
436 // Non-optimized functions always take a reference.
437 Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
438 CGF.getContext().VoidPtrTy);
439 }
440}
441
John McCallfc207f22013-03-07 21:37:12 +0000442RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
443 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
444 QualType MemTy = AtomicTy;
445 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
446 MemTy = AT->getValueType();
447 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
448 uint64_t Size = sizeChars.getQuantity();
449 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
450 unsigned Align = alignChars.getQuantity();
451 unsigned MaxInlineWidthInBits =
John McCallc8e01702013-04-16 22:48:15 +0000452 getTarget().getMaxAtomicInlineWidth();
John McCallfc207f22013-03-07 21:37:12 +0000453 bool UseLibcall = (Size != Align ||
454 getContext().toBits(sizeChars) > MaxInlineWidthInBits);
455
Craig Topper8a13c412014-05-21 05:09:00 +0000456 llvm::Value *OrderFail = nullptr, *Val1 = nullptr, *Val2 = nullptr;
457 llvm::Value *Ptr = EmitScalarExpr(E->getPtr());
John McCallfc207f22013-03-07 21:37:12 +0000458
459 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
460 assert(!Dest && "Init does not return a value");
John McCalla8ec7eb2013-03-07 21:37:17 +0000461 LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
462 EmitAtomicInit(E->getVal1(), lvalue);
Craig Topper8a13c412014-05-21 05:09:00 +0000463 return RValue::get(nullptr);
John McCallfc207f22013-03-07 21:37:12 +0000464 }
465
Craig Topper8a13c412014-05-21 05:09:00 +0000466 llvm::Value *Order = EmitScalarExpr(E->getOrder());
John McCallfc207f22013-03-07 21:37:12 +0000467
468 switch (E->getOp()) {
469 case AtomicExpr::AO__c11_atomic_init:
470 llvm_unreachable("Already handled!");
471
472 case AtomicExpr::AO__c11_atomic_load:
473 case AtomicExpr::AO__atomic_load_n:
474 break;
475
476 case AtomicExpr::AO__atomic_load:
477 Dest = EmitScalarExpr(E->getVal1());
478 break;
479
480 case AtomicExpr::AO__atomic_store:
481 Val1 = EmitScalarExpr(E->getVal1());
482 break;
483
484 case AtomicExpr::AO__atomic_exchange:
485 Val1 = EmitScalarExpr(E->getVal1());
486 Dest = EmitScalarExpr(E->getVal2());
487 break;
488
489 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
490 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
491 case AtomicExpr::AO__atomic_compare_exchange_n:
492 case AtomicExpr::AO__atomic_compare_exchange:
493 Val1 = EmitScalarExpr(E->getVal1());
494 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
495 Val2 = EmitScalarExpr(E->getVal2());
496 else
497 Val2 = EmitValToTemp(*this, E->getVal2());
498 OrderFail = EmitScalarExpr(E->getOrderFail());
499 // Evaluate and discard the 'weak' argument.
500 if (E->getNumSubExprs() == 6)
501 EmitScalarExpr(E->getWeak());
502 break;
503
504 case AtomicExpr::AO__c11_atomic_fetch_add:
505 case AtomicExpr::AO__c11_atomic_fetch_sub:
506 if (MemTy->isPointerType()) {
507 // For pointer arithmetic, we're required to do a bit of math:
508 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
509 // ... but only for the C11 builtins. The GNU builtins expect the
510 // user to multiply by sizeof(T).
511 QualType Val1Ty = E->getVal1()->getType();
512 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
513 CharUnits PointeeIncAmt =
514 getContext().getTypeSizeInChars(MemTy->getPointeeType());
515 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
516 Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
517 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
518 break;
519 }
520 // Fall through.
521 case AtomicExpr::AO__atomic_fetch_add:
522 case AtomicExpr::AO__atomic_fetch_sub:
523 case AtomicExpr::AO__atomic_add_fetch:
524 case AtomicExpr::AO__atomic_sub_fetch:
525 case AtomicExpr::AO__c11_atomic_store:
526 case AtomicExpr::AO__c11_atomic_exchange:
527 case AtomicExpr::AO__atomic_store_n:
528 case AtomicExpr::AO__atomic_exchange_n:
529 case AtomicExpr::AO__c11_atomic_fetch_and:
530 case AtomicExpr::AO__c11_atomic_fetch_or:
531 case AtomicExpr::AO__c11_atomic_fetch_xor:
532 case AtomicExpr::AO__atomic_fetch_and:
533 case AtomicExpr::AO__atomic_fetch_or:
534 case AtomicExpr::AO__atomic_fetch_xor:
535 case AtomicExpr::AO__atomic_fetch_nand:
536 case AtomicExpr::AO__atomic_and_fetch:
537 case AtomicExpr::AO__atomic_or_fetch:
538 case AtomicExpr::AO__atomic_xor_fetch:
539 case AtomicExpr::AO__atomic_nand_fetch:
540 Val1 = EmitValToTemp(*this, E->getVal1());
541 break;
542 }
543
544 if (!E->getType()->isVoidType() && !Dest)
545 Dest = CreateMemTemp(E->getType(), ".atomicdst");
546
547 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
548 if (UseLibcall) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000549 bool UseOptimizedLibcall = false;
550 switch (E->getOp()) {
551 case AtomicExpr::AO__c11_atomic_fetch_add:
552 case AtomicExpr::AO__atomic_fetch_add:
553 case AtomicExpr::AO__c11_atomic_fetch_and:
554 case AtomicExpr::AO__atomic_fetch_and:
555 case AtomicExpr::AO__c11_atomic_fetch_or:
556 case AtomicExpr::AO__atomic_fetch_or:
557 case AtomicExpr::AO__c11_atomic_fetch_sub:
558 case AtomicExpr::AO__atomic_fetch_sub:
559 case AtomicExpr::AO__c11_atomic_fetch_xor:
560 case AtomicExpr::AO__atomic_fetch_xor:
561 // For these, only library calls for certain sizes exist.
562 UseOptimizedLibcall = true;
563 break;
564 default:
565 // Only use optimized library calls for sizes for which they exist.
566 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
567 UseOptimizedLibcall = true;
568 break;
569 }
John McCallfc207f22013-03-07 21:37:12 +0000570
John McCallfc207f22013-03-07 21:37:12 +0000571 CallArgList Args;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000572 if (!UseOptimizedLibcall) {
573 // For non-optimized library calls, the size is the first parameter
574 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
575 getContext().getSizeType());
576 }
577 // Atomic address is the first or second parameter
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000578 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
John McCallfc207f22013-03-07 21:37:12 +0000579
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000580 std::string LibCallName;
Logan Chien74798a32014-03-26 17:35:01 +0000581 QualType LoweredMemTy =
582 MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000583 QualType RetTy;
584 bool HaveRetTy = false;
John McCallfc207f22013-03-07 21:37:12 +0000585 switch (E->getOp()) {
586 // There is only one libcall for compare an exchange, because there is no
587 // optimisation benefit possible from a libcall version of a weak compare
588 // and exchange.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000589 // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
John McCallfc207f22013-03-07 21:37:12 +0000590 // void *desired, int success, int failure)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000591 // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
592 // int success, int failure)
John McCallfc207f22013-03-07 21:37:12 +0000593 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
594 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
595 case AtomicExpr::AO__atomic_compare_exchange:
596 case AtomicExpr::AO__atomic_compare_exchange_n:
597 LibCallName = "__atomic_compare_exchange";
598 RetTy = getContext().BoolTy;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000599 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000600 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
601 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
602 E->getExprLoc());
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000603 Args.add(RValue::get(Order), getContext().IntTy);
John McCallfc207f22013-03-07 21:37:12 +0000604 Order = OrderFail;
605 break;
606 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
607 // int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000608 // T __atomic_exchange_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000609 case AtomicExpr::AO__c11_atomic_exchange:
610 case AtomicExpr::AO__atomic_exchange_n:
611 case AtomicExpr::AO__atomic_exchange:
612 LibCallName = "__atomic_exchange";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000613 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
614 E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000615 break;
616 // void __atomic_store(size_t size, void *mem, void *val, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000617 // void __atomic_store_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000618 case AtomicExpr::AO__c11_atomic_store:
619 case AtomicExpr::AO__atomic_store:
620 case AtomicExpr::AO__atomic_store_n:
621 LibCallName = "__atomic_store";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000622 RetTy = getContext().VoidTy;
623 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000624 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
625 E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000626 break;
627 // void __atomic_load(size_t size, void *mem, void *return, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000628 // T __atomic_load_N(T *mem, int order)
John McCallfc207f22013-03-07 21:37:12 +0000629 case AtomicExpr::AO__c11_atomic_load:
630 case AtomicExpr::AO__atomic_load:
631 case AtomicExpr::AO__atomic_load_n:
632 LibCallName = "__atomic_load";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000633 break;
634 // T __atomic_fetch_add_N(T *mem, T val, int order)
635 case AtomicExpr::AO__c11_atomic_fetch_add:
636 case AtomicExpr::AO__atomic_fetch_add:
637 LibCallName = "__atomic_fetch_add";
Logan Chien74798a32014-03-26 17:35:01 +0000638 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000639 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000640 break;
641 // T __atomic_fetch_and_N(T *mem, T val, int order)
642 case AtomicExpr::AO__c11_atomic_fetch_and:
643 case AtomicExpr::AO__atomic_fetch_and:
644 LibCallName = "__atomic_fetch_and";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000645 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
646 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000647 break;
648 // T __atomic_fetch_or_N(T *mem, T val, int order)
649 case AtomicExpr::AO__c11_atomic_fetch_or:
650 case AtomicExpr::AO__atomic_fetch_or:
651 LibCallName = "__atomic_fetch_or";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000652 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
653 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000654 break;
655 // T __atomic_fetch_sub_N(T *mem, T val, int order)
656 case AtomicExpr::AO__c11_atomic_fetch_sub:
657 case AtomicExpr::AO__atomic_fetch_sub:
658 LibCallName = "__atomic_fetch_sub";
Logan Chien74798a32014-03-26 17:35:01 +0000659 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000660 E->getExprLoc());
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000661 break;
662 // T __atomic_fetch_xor_N(T *mem, T val, int order)
663 case AtomicExpr::AO__c11_atomic_fetch_xor:
664 case AtomicExpr::AO__atomic_fetch_xor:
665 LibCallName = "__atomic_fetch_xor";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000666 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
667 E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000668 break;
John McCallfc207f22013-03-07 21:37:12 +0000669 default: return EmitUnsupportedRValue(E, "atomic library call");
670 }
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000671
672 // Optimized functions have the size in their name.
673 if (UseOptimizedLibcall)
674 LibCallName += "_" + llvm::utostr(Size);
675 // By default, assume we return a value of the atomic type.
676 if (!HaveRetTy) {
677 if (UseOptimizedLibcall) {
678 // Value is returned directly.
679 RetTy = MemTy;
680 } else {
681 // Value is returned through parameter before the order.
682 RetTy = getContext().VoidTy;
683 Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
684 getContext().VoidPtrTy);
685 }
686 }
John McCallfc207f22013-03-07 21:37:12 +0000687 // order is always the last parameter
688 Args.add(RValue::get(Order),
689 getContext().IntTy);
690
691 const CGFunctionInfo &FuncInfo =
692 CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
693 FunctionType::ExtInfo(), RequiredArgs::All);
694 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
695 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
696 RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000697 if (!RetTy->isVoidType())
John McCallfc207f22013-03-07 21:37:12 +0000698 return Res;
699 if (E->getType()->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +0000700 return RValue::get(nullptr);
Nick Lewycky2d84e842013-10-02 02:29:49 +0000701 return convertTempToRValue(Dest, E->getType(), E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000702 }
703
704 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
705 E->getOp() == AtomicExpr::AO__atomic_store ||
706 E->getOp() == AtomicExpr::AO__atomic_store_n;
707 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
708 E->getOp() == AtomicExpr::AO__atomic_load ||
709 E->getOp() == AtomicExpr::AO__atomic_load_n;
710
711 llvm::Type *IPtrTy =
712 llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
713 llvm::Value *OrigDest = Dest;
714 Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
715 if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
716 if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
717 if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
718
719 if (isa<llvm::ConstantInt>(Order)) {
720 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
721 switch (ord) {
Tim Northovere94a34c2014-03-11 10:49:14 +0000722 case AtomicExpr::AO_ABI_memory_order_relaxed:
Tim Northover9c177222014-03-13 19:25:48 +0000723 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
724 Size, Align, llvm::Monotonic);
John McCallfc207f22013-03-07 21:37:12 +0000725 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000726 case AtomicExpr::AO_ABI_memory_order_consume:
727 case AtomicExpr::AO_ABI_memory_order_acquire:
John McCallfc207f22013-03-07 21:37:12 +0000728 if (IsStore)
729 break; // Avoid crashing on code with undefined behavior
Tim Northover9c177222014-03-13 19:25:48 +0000730 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
731 Size, Align, llvm::Acquire);
John McCallfc207f22013-03-07 21:37:12 +0000732 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000733 case AtomicExpr::AO_ABI_memory_order_release:
John McCallfc207f22013-03-07 21:37:12 +0000734 if (IsLoad)
735 break; // Avoid crashing on code with undefined behavior
Tim Northover9c177222014-03-13 19:25:48 +0000736 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
737 Size, Align, llvm::Release);
John McCallfc207f22013-03-07 21:37:12 +0000738 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000739 case AtomicExpr::AO_ABI_memory_order_acq_rel:
John McCallfc207f22013-03-07 21:37:12 +0000740 if (IsLoad || IsStore)
741 break; // Avoid crashing on code with undefined behavior
Tim Northover9c177222014-03-13 19:25:48 +0000742 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
743 Size, Align, llvm::AcquireRelease);
John McCallfc207f22013-03-07 21:37:12 +0000744 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000745 case AtomicExpr::AO_ABI_memory_order_seq_cst:
Tim Northover9c177222014-03-13 19:25:48 +0000746 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
747 Size, Align, llvm::SequentiallyConsistent);
John McCallfc207f22013-03-07 21:37:12 +0000748 break;
749 default: // invalid order
750 // We should not ever get here normally, but it's hard to
751 // enforce that in general.
752 break;
753 }
754 if (E->getType()->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +0000755 return RValue::get(nullptr);
Nick Lewycky2d84e842013-10-02 02:29:49 +0000756 return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000757 }
758
759 // Long case, when Order isn't obviously constant.
760
761 // Create all the relevant BB's
Craig Topper8a13c412014-05-21 05:09:00 +0000762 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
763 *ReleaseBB = nullptr, *AcqRelBB = nullptr,
764 *SeqCstBB = nullptr;
John McCallfc207f22013-03-07 21:37:12 +0000765 MonotonicBB = createBasicBlock("monotonic", CurFn);
766 if (!IsStore)
767 AcquireBB = createBasicBlock("acquire", CurFn);
768 if (!IsLoad)
769 ReleaseBB = createBasicBlock("release", CurFn);
770 if (!IsLoad && !IsStore)
771 AcqRelBB = createBasicBlock("acqrel", CurFn);
772 SeqCstBB = createBasicBlock("seqcst", CurFn);
773 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
774
775 // Create the switch for the split
776 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
777 // doesn't matter unless someone is crazy enough to use something that
778 // doesn't fold to a constant for the ordering.
779 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
780 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
781
782 // Emit all the different atomics
783 Builder.SetInsertPoint(MonotonicBB);
Tim Northover9c177222014-03-13 19:25:48 +0000784 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
785 Size, Align, llvm::Monotonic);
John McCallfc207f22013-03-07 21:37:12 +0000786 Builder.CreateBr(ContBB);
787 if (!IsStore) {
788 Builder.SetInsertPoint(AcquireBB);
Tim Northover9c177222014-03-13 19:25:48 +0000789 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
790 Size, Align, llvm::Acquire);
John McCallfc207f22013-03-07 21:37:12 +0000791 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000792 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
793 AcquireBB);
794 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
795 AcquireBB);
John McCallfc207f22013-03-07 21:37:12 +0000796 }
797 if (!IsLoad) {
798 Builder.SetInsertPoint(ReleaseBB);
Tim Northover9c177222014-03-13 19:25:48 +0000799 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
800 Size, Align, llvm::Release);
John McCallfc207f22013-03-07 21:37:12 +0000801 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000802 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
803 ReleaseBB);
John McCallfc207f22013-03-07 21:37:12 +0000804 }
805 if (!IsLoad && !IsStore) {
806 Builder.SetInsertPoint(AcqRelBB);
Tim Northover9c177222014-03-13 19:25:48 +0000807 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
808 Size, Align, llvm::AcquireRelease);
John McCallfc207f22013-03-07 21:37:12 +0000809 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000810 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
811 AcqRelBB);
John McCallfc207f22013-03-07 21:37:12 +0000812 }
813 Builder.SetInsertPoint(SeqCstBB);
Tim Northover9c177222014-03-13 19:25:48 +0000814 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OrderFail,
815 Size, Align, llvm::SequentiallyConsistent);
John McCallfc207f22013-03-07 21:37:12 +0000816 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000817 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
818 SeqCstBB);
John McCallfc207f22013-03-07 21:37:12 +0000819
820 // Cleanup and return
821 Builder.SetInsertPoint(ContBB);
822 if (E->getType()->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +0000823 return RValue::get(nullptr);
Nick Lewycky2d84e842013-10-02 02:29:49 +0000824 return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000825}
John McCalla8ec7eb2013-03-07 21:37:17 +0000826
827llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
828 unsigned addrspace =
829 cast<llvm::PointerType>(addr->getType())->getAddressSpace();
830 llvm::IntegerType *ty =
831 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
832 return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
833}
834
835RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000836 AggValueSlot resultSlot,
837 SourceLocation loc) const {
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000838 if (EvaluationKind == TEK_Aggregate)
839 return resultSlot.asRValue();
John McCalla8ec7eb2013-03-07 21:37:17 +0000840
841 // Drill into the padding structure if we have one.
842 if (hasPadding())
843 addr = CGF.Builder.CreateStructGEP(addr, 0);
844
John McCalla8ec7eb2013-03-07 21:37:17 +0000845 // Otherwise, just convert the temporary to an r-value using the
846 // normal conversion routine.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000847 return CGF.convertTempToRValue(addr, getValueType(), loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000848}
849
850/// Emit a load from an l-value of atomic type. Note that the r-value
851/// we produce is an r-value of the atomic *value* type.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000852RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
853 AggValueSlot resultSlot) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000854 AtomicInfo atomics(*this, src);
855
856 // Check whether we should use a library call.
857 if (atomics.shouldUseLibcall()) {
858 llvm::Value *tempAddr;
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000859 if (!resultSlot.isIgnored()) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000860 assert(atomics.getEvaluationKind() == TEK_Aggregate);
861 tempAddr = resultSlot.getAddr();
862 } else {
863 tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
864 }
865
866 // void __atomic_load(size_t size, void *mem, void *return, int order);
867 CallArgList args;
868 args.add(RValue::get(atomics.getAtomicSizeValue()),
869 getContext().getSizeType());
870 args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
871 getContext().VoidPtrTy);
872 args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
873 getContext().VoidPtrTy);
Tim Northovere94a34c2014-03-11 10:49:14 +0000874 args.add(RValue::get(llvm::ConstantInt::get(
875 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
John McCalla8ec7eb2013-03-07 21:37:17 +0000876 getContext().IntTy);
877 emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
878
879 // Produce the r-value.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000880 return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000881 }
882
883 // Okay, we're doing this natively.
884 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
885 llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
886 load->setAtomic(llvm::SequentiallyConsistent);
887
888 // Other decoration.
889 load->setAlignment(src.getAlignment().getQuantity());
890 if (src.isVolatileQualified())
891 load->setVolatile(true);
892 if (src.getTBAAInfo())
893 CGM.DecorateInstruction(load, src.getTBAAInfo());
894
895 // Okay, turn that back into the original value type.
896 QualType valueType = atomics.getValueType();
897 llvm::Value *result = load;
898
899 // If we're ignoring an aggregate return, don't do anything.
900 if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
Craig Topper8a13c412014-05-21 05:09:00 +0000901 return RValue::getAggregate(nullptr, false);
John McCalla8ec7eb2013-03-07 21:37:17 +0000902
903 // The easiest way to do this this is to go through memory, but we
904 // try not to in some easy cases.
905 if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
906 llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
907 if (isa<llvm::IntegerType>(resultTy)) {
908 assert(result->getType() == resultTy);
909 result = EmitFromMemory(result, valueType);
910 } else if (isa<llvm::PointerType>(resultTy)) {
911 result = Builder.CreateIntToPtr(result, resultTy);
912 } else {
913 result = Builder.CreateBitCast(result, resultTy);
914 }
915 return RValue::get(result);
916 }
917
918 // Create a temporary. This needs to be big enough to hold the
919 // atomic integer.
920 llvm::Value *temp;
921 bool tempIsVolatile = false;
922 CharUnits tempAlignment;
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000923 if (atomics.getEvaluationKind() == TEK_Aggregate) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000924 assert(!resultSlot.isIgnored());
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000925 temp = resultSlot.getAddr();
926 tempAlignment = atomics.getValueAlignment();
John McCalla8ec7eb2013-03-07 21:37:17 +0000927 tempIsVolatile = resultSlot.isVolatile();
928 } else {
929 temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
930 tempAlignment = atomics.getAtomicAlignment();
931 }
932
933 // Slam the integer into the temporary.
934 llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
935 Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
936 ->setVolatile(tempIsVolatile);
937
Nick Lewycky2d84e842013-10-02 02:29:49 +0000938 return atomics.convertTempToRValue(temp, resultSlot, loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000939}
940
941
942
943/// Copy an r-value into memory as part of storing to an atomic type.
944/// This needs to create a bit-pattern suitable for atomic operations.
945void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
946 // If we have an r-value, the rvalue should be of the atomic type,
947 // which means that the caller is responsible for having zeroed
948 // any padding. Just do an aggregate copy of that type.
949 if (rvalue.isAggregate()) {
950 CGF.EmitAggregateCopy(dest.getAddress(),
951 rvalue.getAggregateAddr(),
952 getAtomicType(),
953 (rvalue.isVolatileQualified()
954 || dest.isVolatileQualified()),
955 dest.getAlignment());
956 return;
957 }
958
959 // Okay, otherwise we're copying stuff.
960
961 // Zero out the buffer if necessary.
962 emitMemSetZeroIfNecessary(dest);
963
964 // Drill past the padding if present.
965 dest = projectValue(dest);
966
967 // Okay, store the rvalue in.
968 if (rvalue.isScalar()) {
969 CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
970 } else {
971 CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
972 }
973}
974
975
976/// Materialize an r-value into memory for the purposes of storing it
977/// to an atomic type.
978llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
979 // Aggregate r-values are already in memory, and EmitAtomicStore
980 // requires them to be values of the atomic type.
981 if (rvalue.isAggregate())
982 return rvalue.getAggregateAddr();
983
984 // Otherwise, make a temporary and materialize into it.
985 llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
986 LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
987 emitCopyIntoMemory(rvalue, tempLV);
988 return temp;
989}
990
991/// Emit a store to an l-value of atomic type.
992///
993/// Note that the r-value is expected to be an r-value *of the atomic
994/// type*; this means that for aggregate r-values, it should include
995/// storage for any padding that was necessary.
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000996void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000997 // If this is an aggregate r-value, it should agree in type except
998 // maybe for address-space qualification.
999 assert(!rvalue.isAggregate() ||
1000 rvalue.getAggregateAddr()->getType()->getPointerElementType()
1001 == dest.getAddress()->getType()->getPointerElementType());
1002
1003 AtomicInfo atomics(*this, dest);
1004
1005 // If this is an initialization, just put the value there normally.
1006 if (isInit) {
1007 atomics.emitCopyIntoMemory(rvalue, dest);
1008 return;
1009 }
1010
1011 // Check whether we should use a library call.
1012 if (atomics.shouldUseLibcall()) {
1013 // Produce a source address.
1014 llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
1015
1016 // void __atomic_store(size_t size, void *mem, void *val, int order)
1017 CallArgList args;
1018 args.add(RValue::get(atomics.getAtomicSizeValue()),
1019 getContext().getSizeType());
1020 args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
1021 getContext().VoidPtrTy);
1022 args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
1023 getContext().VoidPtrTy);
Tim Northovere94a34c2014-03-11 10:49:14 +00001024 args.add(RValue::get(llvm::ConstantInt::get(
1025 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
John McCalla8ec7eb2013-03-07 21:37:17 +00001026 getContext().IntTy);
1027 emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1028 return;
1029 }
1030
1031 // Okay, we're doing this natively.
1032 llvm::Value *intValue;
1033
1034 // If we've got a scalar value of the right size, try to avoid going
1035 // through memory.
1036 if (rvalue.isScalar() && !atomics.hasPadding()) {
1037 llvm::Value *value = rvalue.getScalarVal();
1038 if (isa<llvm::IntegerType>(value->getType())) {
1039 intValue = value;
1040 } else {
1041 llvm::IntegerType *inputIntTy =
1042 llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
1043 if (isa<llvm::PointerType>(value->getType())) {
1044 intValue = Builder.CreatePtrToInt(value, inputIntTy);
1045 } else {
1046 intValue = Builder.CreateBitCast(value, inputIntTy);
1047 }
1048 }
1049
1050 // Otherwise, we need to go through memory.
1051 } else {
1052 // Put the r-value in memory.
1053 llvm::Value *addr = atomics.materializeRValue(rvalue);
1054
1055 // Cast the temporary to the atomic int type and pull a value out.
1056 addr = atomics.emitCastToAtomicIntPointer(addr);
1057 intValue = Builder.CreateAlignedLoad(addr,
1058 atomics.getAtomicAlignment().getQuantity());
1059 }
1060
1061 // Do the atomic store.
1062 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
1063 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1064
1065 // Initializations don't need to be atomic.
1066 if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
1067
1068 // Other decoration.
1069 store->setAlignment(dest.getAlignment().getQuantity());
1070 if (dest.isVolatileQualified())
1071 store->setVolatile(true);
1072 if (dest.getTBAAInfo())
1073 CGM.DecorateInstruction(store, dest.getTBAAInfo());
1074}
1075
1076void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1077 AtomicInfo atomics(*this, dest);
1078
1079 switch (atomics.getEvaluationKind()) {
1080 case TEK_Scalar: {
1081 llvm::Value *value = EmitScalarExpr(init);
1082 atomics.emitCopyIntoMemory(RValue::get(value), dest);
1083 return;
1084 }
1085
1086 case TEK_Complex: {
1087 ComplexPairTy value = EmitComplexExpr(init);
1088 atomics.emitCopyIntoMemory(RValue::getComplex(value), dest);
1089 return;
1090 }
1091
1092 case TEK_Aggregate: {
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001093 // Fix up the destination if the initializer isn't an expression
1094 // of atomic type.
1095 bool Zeroed = false;
John McCalla8ec7eb2013-03-07 21:37:17 +00001096 if (!init->getType()->isAtomicType()) {
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001097 Zeroed = atomics.emitMemSetZeroIfNecessary(dest);
John McCalla8ec7eb2013-03-07 21:37:17 +00001098 dest = atomics.projectValue(dest);
1099 }
1100
1101 // Evaluate the expression directly into the destination.
1102 AggValueSlot slot = AggValueSlot::forLValue(dest,
1103 AggValueSlot::IsNotDestructed,
1104 AggValueSlot::DoesNotNeedGCBarriers,
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001105 AggValueSlot::IsNotAliased,
1106 Zeroed ? AggValueSlot::IsZeroed :
1107 AggValueSlot::IsNotZeroed);
1108
John McCalla8ec7eb2013-03-07 21:37:17 +00001109 EmitAggExpr(init, slot);
1110 return;
1111 }
1112 }
1113 llvm_unreachable("bad evaluation kind");
1114}