blob: 2af2264634e52012d27db3f5a565c5d734762328 [file] [log] [blame]
John McCallfafaaef2013-03-07 21:37:12 +00001//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the code for emitting atomic operations.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCall.h"
Stephen Hines0e2c34f2015-03-23 12:09:02 -070016#include "CGRecordLayout.h"
John McCallfafaaef2013-03-07 21:37:12 +000017#include "CodeGenModule.h"
18#include "clang/AST/ASTContext.h"
Mark Lacey8b549992013-10-30 21:53:58 +000019#include "clang/CodeGen/CGFunctionInfo.h"
Ed Schoutene4692492013-05-31 19:27:59 +000020#include "llvm/ADT/StringExtras.h"
John McCallfafaaef2013-03-07 21:37:12 +000021#include "llvm/IR/DataLayout.h"
22#include "llvm/IR/Intrinsics.h"
John McCall9eda3ab2013-03-07 21:37:17 +000023#include "llvm/IR/Operator.h"
John McCallfafaaef2013-03-07 21:37:12 +000024
25using namespace clang;
26using namespace CodeGen;
27
John McCall9eda3ab2013-03-07 21:37:17 +000028namespace {
29 class AtomicInfo {
30 CodeGenFunction &CGF;
31 QualType AtomicTy;
32 QualType ValueTy;
33 uint64_t AtomicSizeInBits;
34 uint64_t ValueSizeInBits;
35 CharUnits AtomicAlign;
36 CharUnits ValueAlign;
37 CharUnits LValueAlign;
38 TypeEvaluationKind EvaluationKind;
39 bool UseLibcall;
Stephen Hines0e2c34f2015-03-23 12:09:02 -070040 LValue LVal;
41 CGBitFieldInfo BFI;
John McCall9eda3ab2013-03-07 21:37:17 +000042 public:
Stephen Hines0e2c34f2015-03-23 12:09:02 -070043 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45 EvaluationKind(TEK_Scalar), UseLibcall(true) {
46 assert(!lvalue.isGlobalReg());
John McCall9eda3ab2013-03-07 21:37:17 +000047 ASTContext &C = CGF.getContext();
Stephen Hines0e2c34f2015-03-23 12:09:02 -070048 if (lvalue.isSimple()) {
49 AtomicTy = lvalue.getType();
50 if (auto *ATy = AtomicTy->getAs<AtomicType>())
51 ValueTy = ATy->getValueType();
52 else
53 ValueTy = AtomicTy;
54 EvaluationKind = CGF.getEvaluationKind(ValueTy);
John McCall9eda3ab2013-03-07 21:37:17 +000055
Stephen Hines0e2c34f2015-03-23 12:09:02 -070056 uint64_t ValueAlignInBits;
57 uint64_t AtomicAlignInBits;
58 TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59 ValueSizeInBits = ValueTI.Width;
60 ValueAlignInBits = ValueTI.Align;
John McCall9eda3ab2013-03-07 21:37:17 +000061
Stephen Hines0e2c34f2015-03-23 12:09:02 -070062 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63 AtomicSizeInBits = AtomicTI.Width;
64 AtomicAlignInBits = AtomicTI.Align;
John McCall9eda3ab2013-03-07 21:37:17 +000065
Stephen Hines0e2c34f2015-03-23 12:09:02 -070066 assert(ValueSizeInBits <= AtomicSizeInBits);
67 assert(ValueAlignInBits <= AtomicAlignInBits);
John McCall9eda3ab2013-03-07 21:37:17 +000068
Stephen Hines0e2c34f2015-03-23 12:09:02 -070069 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71 if (lvalue.getAlignment().isZero())
72 lvalue.setAlignment(AtomicAlign);
John McCall9eda3ab2013-03-07 21:37:17 +000073
Stephen Hines0e2c34f2015-03-23 12:09:02 -070074 LVal = lvalue;
75 } else if (lvalue.isBitField()) {
76 auto &OrigBFI = lvalue.getBitFieldInfo();
77 auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
78 AtomicSizeInBits = C.toBits(
79 C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
80 .RoundUpToAlignment(lvalue.getAlignment()));
81 auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldAddr());
82 auto OffsetInChars =
83 (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
84 lvalue.getAlignment();
85 VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
86 VoidPtrAddr, OffsetInChars.getQuantity());
87 auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
88 VoidPtrAddr,
89 CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
90 "atomic_bitfield_base");
91 BFI = OrigBFI;
92 BFI.Offset = Offset;
93 BFI.StorageSize = AtomicSizeInBits;
94 LVal = LValue::MakeBitfield(Addr, BFI, lvalue.getType(),
95 lvalue.getAlignment());
96 } else if (lvalue.isVectorElt()) {
97 AtomicSizeInBits = C.getTypeSize(lvalue.getType());
98 LVal = lvalue;
99 } else {
100 assert(lvalue.isExtVectorElt());
101 AtomicSizeInBits = C.getTypeSize(lvalue.getType());
102 LVal = lvalue;
103 }
104 UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
105 AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
John McCall9eda3ab2013-03-07 21:37:17 +0000106 }
107
108 QualType getAtomicType() const { return AtomicTy; }
109 QualType getValueType() const { return ValueTy; }
110 CharUnits getAtomicAlignment() const { return AtomicAlign; }
111 CharUnits getValueAlignment() const { return ValueAlign; }
112 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700113 uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
John McCall9eda3ab2013-03-07 21:37:17 +0000114 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
115 bool shouldUseLibcall() const { return UseLibcall; }
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700116 const LValue &getAtomicLValue() const { return LVal; }
John McCall9eda3ab2013-03-07 21:37:17 +0000117
118 /// Is the atomic size larger than the underlying value type?
119 ///
120 /// Note that the absence of padding does not mean that atomic
121 /// objects are completely interchangeable with non-atomic
122 /// objects: we might have promoted the alignment of a type
123 /// without making it bigger.
124 bool hasPadding() const {
125 return (ValueSizeInBits != AtomicSizeInBits);
126 }
127
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700128 bool emitMemSetZeroIfNecessary() const;
John McCall9eda3ab2013-03-07 21:37:17 +0000129
130 llvm::Value *getAtomicSizeValue() const {
131 CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
132 return CGF.CGM.getSize(size);
133 }
134
135 /// Cast the given pointer to an integer pointer suitable for
136 /// atomic operations.
137 llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
138
139 /// Turn an atomic-layout object into an r-value.
140 RValue convertTempToRValue(llvm::Value *addr,
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000141 AggValueSlot resultSlot,
142 SourceLocation loc) const;
John McCall9eda3ab2013-03-07 21:37:17 +0000143
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700144 /// \brief Converts a rvalue to integer value.
145 llvm::Value *convertRValueToInt(RValue RVal) const;
146
147 RValue convertIntToValue(llvm::Value *IntVal, AggValueSlot ResultSlot,
148 SourceLocation Loc) const;
149
John McCall9eda3ab2013-03-07 21:37:17 +0000150 /// Copy an atomic r-value into atomic-layout memory.
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700151 void emitCopyIntoMemory(RValue rvalue) const;
John McCall9eda3ab2013-03-07 21:37:17 +0000152
153 /// Project an l-value down to the value field.
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700154 LValue projectValue() const {
155 assert(LVal.isSimple());
156 llvm::Value *addr = LVal.getAddress();
John McCall9eda3ab2013-03-07 21:37:17 +0000157 if (hasPadding())
158 addr = CGF.Builder.CreateStructGEP(addr, 0);
159
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700160 return LValue::MakeAddr(addr, getValueType(), LVal.getAlignment(),
161 CGF.getContext(), LVal.getTBAAInfo());
John McCall9eda3ab2013-03-07 21:37:17 +0000162 }
163
164 /// Materialize an atomic r-value in atomic-layout memory.
165 llvm::Value *materializeRValue(RValue rvalue) const;
166
167 private:
168 bool requiresMemSetZero(llvm::Type *type) const;
169 };
170}
171
172static RValue emitAtomicLibcall(CodeGenFunction &CGF,
173 StringRef fnName,
174 QualType resultType,
175 CallArgList &args) {
176 const CGFunctionInfo &fnInfo =
177 CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
178 FunctionType::ExtInfo(), RequiredArgs::All);
179 llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
180 llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
181 return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
182}
183
184/// Does a store of the given IR type modify the full expected width?
185static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
186 uint64_t expectedSize) {
187 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
188}
189
190/// Does the atomic type require memsetting to zero before initialization?
191///
192/// The IR type is provided as a way of making certain queries faster.
193bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
194 // If the atomic type has size padding, we definitely need a memset.
195 if (hasPadding()) return true;
196
197 // Otherwise, do some simple heuristics to try to avoid it:
198 switch (getEvaluationKind()) {
199 // For scalars and complexes, check whether the store size of the
200 // type uses the full size.
201 case TEK_Scalar:
202 return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
203 case TEK_Complex:
204 return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
205 AtomicSizeInBits / 2);
206
Eli Friedman336d9df2013-07-11 01:32:21 +0000207 // Padding in structs has an undefined bit pattern. User beware.
John McCall9eda3ab2013-03-07 21:37:17 +0000208 case TEK_Aggregate:
Eli Friedman336d9df2013-07-11 01:32:21 +0000209 return false;
John McCall9eda3ab2013-03-07 21:37:17 +0000210 }
211 llvm_unreachable("bad evaluation kind");
212}
213
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700214bool AtomicInfo::emitMemSetZeroIfNecessary() const {
215 assert(LVal.isSimple());
216 llvm::Value *addr = LVal.getAddress();
John McCall9eda3ab2013-03-07 21:37:17 +0000217 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
Eli Friedman336d9df2013-07-11 01:32:21 +0000218 return false;
John McCall9eda3ab2013-03-07 21:37:17 +0000219
220 CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
221 AtomicSizeInBits / 8,
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700222 LVal.getAlignment().getQuantity());
Eli Friedman336d9df2013-07-11 01:32:21 +0000223 return true;
John McCall9eda3ab2013-03-07 21:37:17 +0000224}
225
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700226static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
Stephen Hines651f13c2014-04-23 16:59:28 -0700227 llvm::Value *Dest, llvm::Value *Ptr,
228 llvm::Value *Val1, llvm::Value *Val2,
229 uint64_t Size, unsigned Align,
230 llvm::AtomicOrdering SuccessOrder,
231 llvm::AtomicOrdering FailureOrder) {
232 // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
233 llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
234 Expected->setAlignment(Align);
235 llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
236 Desired->setAlignment(Align);
237
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700238 llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
Stephen Hines651f13c2014-04-23 16:59:28 -0700239 Ptr, Expected, Desired, SuccessOrder, FailureOrder);
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700240 Pair->setVolatile(E->isVolatile());
241 Pair->setWeak(IsWeak);
Stephen Hines651f13c2014-04-23 16:59:28 -0700242
243 // Cmp holds the result of the compare-exchange operation: true on success,
244 // false on failure.
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700245 llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
246 llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
Stephen Hines651f13c2014-04-23 16:59:28 -0700247
248 // This basic block is used to hold the store instruction if the operation
249 // failed.
250 llvm::BasicBlock *StoreExpectedBB =
251 CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
252
253 // This basic block is the exit point of the operation, we should end up
254 // here regardless of whether or not the operation succeeded.
255 llvm::BasicBlock *ContinueBB =
256 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
257
258 // Update Expected if Expected isn't equal to Old, otherwise branch to the
259 // exit point.
260 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
261
262 CGF.Builder.SetInsertPoint(StoreExpectedBB);
263 // Update the memory at Expected with Old's value.
264 llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
265 StoreExpected->setAlignment(Align);
266 // Finally, branch to the exit point.
267 CGF.Builder.CreateBr(ContinueBB);
268
269 CGF.Builder.SetInsertPoint(ContinueBB);
270 // Update the memory at Dest with Cmp's value.
271 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
272 return;
273}
274
275/// Given an ordering required on success, emit all possible cmpxchg
276/// instructions to cope with the provided (but possibly only dynamically known)
277/// FailureOrder.
278static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700279 bool IsWeak, llvm::Value *Dest,
280 llvm::Value *Ptr, llvm::Value *Val1,
281 llvm::Value *Val2,
Stephen Hines651f13c2014-04-23 16:59:28 -0700282 llvm::Value *FailureOrderVal,
283 uint64_t Size, unsigned Align,
284 llvm::AtomicOrdering SuccessOrder) {
285 llvm::AtomicOrdering FailureOrder;
286 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
287 switch (FO->getSExtValue()) {
288 default:
289 FailureOrder = llvm::Monotonic;
290 break;
291 case AtomicExpr::AO_ABI_memory_order_consume:
292 case AtomicExpr::AO_ABI_memory_order_acquire:
293 FailureOrder = llvm::Acquire;
294 break;
295 case AtomicExpr::AO_ABI_memory_order_seq_cst:
296 FailureOrder = llvm::SequentiallyConsistent;
297 break;
298 }
299 if (FailureOrder >= SuccessOrder) {
300 // Don't assert on undefined behaviour.
301 FailureOrder =
302 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
303 }
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700304 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align,
305 SuccessOrder, FailureOrder);
Stephen Hines651f13c2014-04-23 16:59:28 -0700306 return;
307 }
308
309 // Create all the relevant BB's
Stephen Hines6bcf27b2014-05-29 04:14:42 -0700310 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
311 *SeqCstBB = nullptr;
Stephen Hines651f13c2014-04-23 16:59:28 -0700312 MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
313 if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
314 AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
315 if (SuccessOrder == llvm::SequentiallyConsistent)
316 SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
317
318 llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
319
320 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
321
322 // Emit all the different atomics
323
324 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
325 // doesn't matter unless someone is crazy enough to use something that
326 // doesn't fold to a constant for the ordering.
327 CGF.Builder.SetInsertPoint(MonotonicBB);
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700328 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Stephen Hines651f13c2014-04-23 16:59:28 -0700329 Size, Align, SuccessOrder, llvm::Monotonic);
330 CGF.Builder.CreateBr(ContBB);
331
332 if (AcquireBB) {
333 CGF.Builder.SetInsertPoint(AcquireBB);
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700334 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Stephen Hines651f13c2014-04-23 16:59:28 -0700335 Size, Align, SuccessOrder, llvm::Acquire);
336 CGF.Builder.CreateBr(ContBB);
337 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
338 AcquireBB);
339 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
340 AcquireBB);
341 }
342 if (SeqCstBB) {
343 CGF.Builder.SetInsertPoint(SeqCstBB);
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700344 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Stephen Hines651f13c2014-04-23 16:59:28 -0700345 Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
346 CGF.Builder.CreateBr(ContBB);
347 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
348 SeqCstBB);
349 }
350
351 CGF.Builder.SetInsertPoint(ContBB);
352}
353
354static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
355 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700356 llvm::Value *IsWeak, llvm::Value *FailureOrder,
357 uint64_t Size, unsigned Align,
358 llvm::AtomicOrdering Order) {
John McCallfafaaef2013-03-07 21:37:12 +0000359 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
360 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
361
362 switch (E->getOp()) {
363 case AtomicExpr::AO__c11_atomic_init:
364 llvm_unreachable("Already handled!");
365
366 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700367 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
368 FailureOrder, Size, Align, Order);
John McCallfafaaef2013-03-07 21:37:12 +0000369 return;
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700370 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
371 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
372 FailureOrder, Size, Align, Order);
373 return;
374 case AtomicExpr::AO__atomic_compare_exchange:
375 case AtomicExpr::AO__atomic_compare_exchange_n: {
376 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
377 emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
378 Val1, Val2, FailureOrder, Size, Align, Order);
379 } else {
380 // Create all the relevant BB's
381 llvm::BasicBlock *StrongBB =
382 CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
383 llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
384 llvm::BasicBlock *ContBB =
385 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
386
387 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
388 SI->addCase(CGF.Builder.getInt1(false), StrongBB);
389
390 CGF.Builder.SetInsertPoint(StrongBB);
391 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
392 FailureOrder, Size, Align, Order);
393 CGF.Builder.CreateBr(ContBB);
394
395 CGF.Builder.SetInsertPoint(WeakBB);
396 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
397 FailureOrder, Size, Align, Order);
398 CGF.Builder.CreateBr(ContBB);
399
400 CGF.Builder.SetInsertPoint(ContBB);
401 }
402 return;
403 }
John McCallfafaaef2013-03-07 21:37:12 +0000404 case AtomicExpr::AO__c11_atomic_load:
405 case AtomicExpr::AO__atomic_load_n:
406 case AtomicExpr::AO__atomic_load: {
407 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
408 Load->setAtomic(Order);
409 Load->setAlignment(Size);
410 Load->setVolatile(E->isVolatile());
411 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
412 StoreDest->setAlignment(Align);
413 return;
414 }
415
416 case AtomicExpr::AO__c11_atomic_store:
417 case AtomicExpr::AO__atomic_store:
418 case AtomicExpr::AO__atomic_store_n: {
419 assert(!Dest && "Store does not return a value");
420 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
421 LoadVal1->setAlignment(Align);
422 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
423 Store->setAtomic(Order);
424 Store->setAlignment(Size);
425 Store->setVolatile(E->isVolatile());
426 return;
427 }
428
429 case AtomicExpr::AO__c11_atomic_exchange:
430 case AtomicExpr::AO__atomic_exchange_n:
431 case AtomicExpr::AO__atomic_exchange:
432 Op = llvm::AtomicRMWInst::Xchg;
433 break;
434
435 case AtomicExpr::AO__atomic_add_fetch:
436 PostOp = llvm::Instruction::Add;
437 // Fall through.
438 case AtomicExpr::AO__c11_atomic_fetch_add:
439 case AtomicExpr::AO__atomic_fetch_add:
440 Op = llvm::AtomicRMWInst::Add;
441 break;
442
443 case AtomicExpr::AO__atomic_sub_fetch:
444 PostOp = llvm::Instruction::Sub;
445 // Fall through.
446 case AtomicExpr::AO__c11_atomic_fetch_sub:
447 case AtomicExpr::AO__atomic_fetch_sub:
448 Op = llvm::AtomicRMWInst::Sub;
449 break;
450
451 case AtomicExpr::AO__atomic_and_fetch:
452 PostOp = llvm::Instruction::And;
453 // Fall through.
454 case AtomicExpr::AO__c11_atomic_fetch_and:
455 case AtomicExpr::AO__atomic_fetch_and:
456 Op = llvm::AtomicRMWInst::And;
457 break;
458
459 case AtomicExpr::AO__atomic_or_fetch:
460 PostOp = llvm::Instruction::Or;
461 // Fall through.
462 case AtomicExpr::AO__c11_atomic_fetch_or:
463 case AtomicExpr::AO__atomic_fetch_or:
464 Op = llvm::AtomicRMWInst::Or;
465 break;
466
467 case AtomicExpr::AO__atomic_xor_fetch:
468 PostOp = llvm::Instruction::Xor;
469 // Fall through.
470 case AtomicExpr::AO__c11_atomic_fetch_xor:
471 case AtomicExpr::AO__atomic_fetch_xor:
472 Op = llvm::AtomicRMWInst::Xor;
473 break;
474
475 case AtomicExpr::AO__atomic_nand_fetch:
476 PostOp = llvm::Instruction::And;
477 // Fall through.
478 case AtomicExpr::AO__atomic_fetch_nand:
479 Op = llvm::AtomicRMWInst::Nand;
480 break;
481 }
482
483 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
484 LoadVal1->setAlignment(Align);
485 llvm::AtomicRMWInst *RMWI =
486 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
487 RMWI->setVolatile(E->isVolatile());
488
489 // For __atomic_*_fetch operations, perform the operation again to
490 // determine the value which was written.
491 llvm::Value *Result = RMWI;
492 if (PostOp)
493 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
494 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
495 Result = CGF.Builder.CreateNot(Result);
496 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
497 StoreDest->setAlignment(Align);
498}
499
500// This function emits any expression (scalar, complex, or aggregate)
501// into a temporary alloca.
502static llvm::Value *
503EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
504 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
505 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
506 /*Init*/ true);
507 return DeclPtr;
508}
509
Ed Schoutene4692492013-05-31 19:27:59 +0000510static void
511AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000512 bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
Stephen Hines176edba2014-12-01 14:53:08 -0800513 SourceLocation Loc, CharUnits SizeInChars) {
Ed Schoutene4692492013-05-31 19:27:59 +0000514 if (UseOptimizedLibcall) {
515 // Load value and pass it to the function directly.
516 unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
Stephen Hines176edba2014-12-01 14:53:08 -0800517 int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
518 ValTy =
519 CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
520 llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
521 SizeInBits)->getPointerTo();
522 Val = CGF.EmitLoadOfScalar(CGF.Builder.CreateBitCast(Val, IPtrTy), false,
523 Align, CGF.getContext().getPointerType(ValTy),
524 Loc);
525 // Coerce the value into an appropriately sized integer type.
Ed Schoutene4692492013-05-31 19:27:59 +0000526 Args.add(RValue::get(Val), ValTy);
527 } else {
528 // Non-optimized functions always take a reference.
529 Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
530 CGF.getContext().VoidPtrTy);
531 }
532}
533
John McCallfafaaef2013-03-07 21:37:12 +0000534RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
535 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
536 QualType MemTy = AtomicTy;
537 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
538 MemTy = AT->getValueType();
539 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
540 uint64_t Size = sizeChars.getQuantity();
541 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
542 unsigned Align = alignChars.getQuantity();
543 unsigned MaxInlineWidthInBits =
John McCall64aa4b32013-04-16 22:48:15 +0000544 getTarget().getMaxAtomicInlineWidth();
John McCallfafaaef2013-03-07 21:37:12 +0000545 bool UseLibcall = (Size != Align ||
546 getContext().toBits(sizeChars) > MaxInlineWidthInBits);
547
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700548 llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr,
549 *Val2 = nullptr;
Stephen Hines6bcf27b2014-05-29 04:14:42 -0700550 llvm::Value *Ptr = EmitScalarExpr(E->getPtr());
John McCallfafaaef2013-03-07 21:37:12 +0000551
552 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
553 assert(!Dest && "Init does not return a value");
John McCall9eda3ab2013-03-07 21:37:17 +0000554 LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
555 EmitAtomicInit(E->getVal1(), lvalue);
Stephen Hines6bcf27b2014-05-29 04:14:42 -0700556 return RValue::get(nullptr);
John McCallfafaaef2013-03-07 21:37:12 +0000557 }
558
Stephen Hines6bcf27b2014-05-29 04:14:42 -0700559 llvm::Value *Order = EmitScalarExpr(E->getOrder());
John McCallfafaaef2013-03-07 21:37:12 +0000560
561 switch (E->getOp()) {
562 case AtomicExpr::AO__c11_atomic_init:
563 llvm_unreachable("Already handled!");
564
565 case AtomicExpr::AO__c11_atomic_load:
566 case AtomicExpr::AO__atomic_load_n:
567 break;
568
569 case AtomicExpr::AO__atomic_load:
570 Dest = EmitScalarExpr(E->getVal1());
571 break;
572
573 case AtomicExpr::AO__atomic_store:
574 Val1 = EmitScalarExpr(E->getVal1());
575 break;
576
577 case AtomicExpr::AO__atomic_exchange:
578 Val1 = EmitScalarExpr(E->getVal1());
579 Dest = EmitScalarExpr(E->getVal2());
580 break;
581
582 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
583 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
584 case AtomicExpr::AO__atomic_compare_exchange_n:
585 case AtomicExpr::AO__atomic_compare_exchange:
586 Val1 = EmitScalarExpr(E->getVal1());
587 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
588 Val2 = EmitScalarExpr(E->getVal2());
589 else
590 Val2 = EmitValToTemp(*this, E->getVal2());
591 OrderFail = EmitScalarExpr(E->getOrderFail());
John McCallfafaaef2013-03-07 21:37:12 +0000592 if (E->getNumSubExprs() == 6)
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700593 IsWeak = EmitScalarExpr(E->getWeak());
John McCallfafaaef2013-03-07 21:37:12 +0000594 break;
595
596 case AtomicExpr::AO__c11_atomic_fetch_add:
597 case AtomicExpr::AO__c11_atomic_fetch_sub:
598 if (MemTy->isPointerType()) {
599 // For pointer arithmetic, we're required to do a bit of math:
600 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
601 // ... but only for the C11 builtins. The GNU builtins expect the
602 // user to multiply by sizeof(T).
603 QualType Val1Ty = E->getVal1()->getType();
604 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
605 CharUnits PointeeIncAmt =
606 getContext().getTypeSizeInChars(MemTy->getPointeeType());
607 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
608 Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
609 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
610 break;
611 }
612 // Fall through.
613 case AtomicExpr::AO__atomic_fetch_add:
614 case AtomicExpr::AO__atomic_fetch_sub:
615 case AtomicExpr::AO__atomic_add_fetch:
616 case AtomicExpr::AO__atomic_sub_fetch:
617 case AtomicExpr::AO__c11_atomic_store:
618 case AtomicExpr::AO__c11_atomic_exchange:
619 case AtomicExpr::AO__atomic_store_n:
620 case AtomicExpr::AO__atomic_exchange_n:
621 case AtomicExpr::AO__c11_atomic_fetch_and:
622 case AtomicExpr::AO__c11_atomic_fetch_or:
623 case AtomicExpr::AO__c11_atomic_fetch_xor:
624 case AtomicExpr::AO__atomic_fetch_and:
625 case AtomicExpr::AO__atomic_fetch_or:
626 case AtomicExpr::AO__atomic_fetch_xor:
627 case AtomicExpr::AO__atomic_fetch_nand:
628 case AtomicExpr::AO__atomic_and_fetch:
629 case AtomicExpr::AO__atomic_or_fetch:
630 case AtomicExpr::AO__atomic_xor_fetch:
631 case AtomicExpr::AO__atomic_nand_fetch:
632 Val1 = EmitValToTemp(*this, E->getVal1());
633 break;
634 }
635
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700636 QualType RValTy = E->getType().getUnqualifiedType();
637
638 auto GetDest = [&] {
639 if (!RValTy->isVoidType() && !Dest) {
640 Dest = CreateMemTemp(RValTy, ".atomicdst");
641 }
642 return Dest;
643 };
John McCallfafaaef2013-03-07 21:37:12 +0000644
645 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
646 if (UseLibcall) {
Ed Schoutene4692492013-05-31 19:27:59 +0000647 bool UseOptimizedLibcall = false;
648 switch (E->getOp()) {
649 case AtomicExpr::AO__c11_atomic_fetch_add:
650 case AtomicExpr::AO__atomic_fetch_add:
651 case AtomicExpr::AO__c11_atomic_fetch_and:
652 case AtomicExpr::AO__atomic_fetch_and:
653 case AtomicExpr::AO__c11_atomic_fetch_or:
654 case AtomicExpr::AO__atomic_fetch_or:
655 case AtomicExpr::AO__c11_atomic_fetch_sub:
656 case AtomicExpr::AO__atomic_fetch_sub:
657 case AtomicExpr::AO__c11_atomic_fetch_xor:
658 case AtomicExpr::AO__atomic_fetch_xor:
659 // For these, only library calls for certain sizes exist.
660 UseOptimizedLibcall = true;
661 break;
662 default:
663 // Only use optimized library calls for sizes for which they exist.
664 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
665 UseOptimizedLibcall = true;
666 break;
667 }
John McCallfafaaef2013-03-07 21:37:12 +0000668
John McCallfafaaef2013-03-07 21:37:12 +0000669 CallArgList Args;
Ed Schoutene4692492013-05-31 19:27:59 +0000670 if (!UseOptimizedLibcall) {
671 // For non-optimized library calls, the size is the first parameter
672 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
673 getContext().getSizeType());
674 }
675 // Atomic address is the first or second parameter
Nick Lewycky5d4a7552013-10-01 21:51:38 +0000676 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
John McCallfafaaef2013-03-07 21:37:12 +0000677
Ed Schoutene4692492013-05-31 19:27:59 +0000678 std::string LibCallName;
Stephen Hines651f13c2014-04-23 16:59:28 -0700679 QualType LoweredMemTy =
680 MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
Ed Schoutene4692492013-05-31 19:27:59 +0000681 QualType RetTy;
682 bool HaveRetTy = false;
John McCallfafaaef2013-03-07 21:37:12 +0000683 switch (E->getOp()) {
684 // There is only one libcall for compare an exchange, because there is no
685 // optimisation benefit possible from a libcall version of a weak compare
686 // and exchange.
Ed Schoutene4692492013-05-31 19:27:59 +0000687 // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
John McCallfafaaef2013-03-07 21:37:12 +0000688 // void *desired, int success, int failure)
Ed Schoutene4692492013-05-31 19:27:59 +0000689 // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
690 // int success, int failure)
John McCallfafaaef2013-03-07 21:37:12 +0000691 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
692 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
693 case AtomicExpr::AO__atomic_compare_exchange:
694 case AtomicExpr::AO__atomic_compare_exchange_n:
695 LibCallName = "__atomic_compare_exchange";
696 RetTy = getContext().BoolTy;
Ed Schoutene4692492013-05-31 19:27:59 +0000697 HaveRetTy = true;
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000698 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
699 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
Stephen Hines176edba2014-12-01 14:53:08 -0800700 E->getExprLoc(), sizeChars);
Nick Lewycky5d4a7552013-10-01 21:51:38 +0000701 Args.add(RValue::get(Order), getContext().IntTy);
John McCallfafaaef2013-03-07 21:37:12 +0000702 Order = OrderFail;
703 break;
704 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
705 // int order)
Ed Schoutene4692492013-05-31 19:27:59 +0000706 // T __atomic_exchange_N(T *mem, T val, int order)
John McCallfafaaef2013-03-07 21:37:12 +0000707 case AtomicExpr::AO__c11_atomic_exchange:
708 case AtomicExpr::AO__atomic_exchange_n:
709 case AtomicExpr::AO__atomic_exchange:
710 LibCallName = "__atomic_exchange";
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000711 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
Stephen Hines176edba2014-12-01 14:53:08 -0800712 E->getExprLoc(), sizeChars);
John McCallfafaaef2013-03-07 21:37:12 +0000713 break;
714 // void __atomic_store(size_t size, void *mem, void *val, int order)
Ed Schoutene4692492013-05-31 19:27:59 +0000715 // void __atomic_store_N(T *mem, T val, int order)
John McCallfafaaef2013-03-07 21:37:12 +0000716 case AtomicExpr::AO__c11_atomic_store:
717 case AtomicExpr::AO__atomic_store:
718 case AtomicExpr::AO__atomic_store_n:
719 LibCallName = "__atomic_store";
Ed Schoutene4692492013-05-31 19:27:59 +0000720 RetTy = getContext().VoidTy;
721 HaveRetTy = true;
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000722 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
Stephen Hines176edba2014-12-01 14:53:08 -0800723 E->getExprLoc(), sizeChars);
John McCallfafaaef2013-03-07 21:37:12 +0000724 break;
725 // void __atomic_load(size_t size, void *mem, void *return, int order)
Ed Schoutene4692492013-05-31 19:27:59 +0000726 // T __atomic_load_N(T *mem, int order)
John McCallfafaaef2013-03-07 21:37:12 +0000727 case AtomicExpr::AO__c11_atomic_load:
728 case AtomicExpr::AO__atomic_load:
729 case AtomicExpr::AO__atomic_load_n:
730 LibCallName = "__atomic_load";
Ed Schoutene4692492013-05-31 19:27:59 +0000731 break;
732 // T __atomic_fetch_add_N(T *mem, T val, int order)
733 case AtomicExpr::AO__c11_atomic_fetch_add:
734 case AtomicExpr::AO__atomic_fetch_add:
735 LibCallName = "__atomic_fetch_add";
Stephen Hines651f13c2014-04-23 16:59:28 -0700736 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
Stephen Hines176edba2014-12-01 14:53:08 -0800737 E->getExprLoc(), sizeChars);
Ed Schoutene4692492013-05-31 19:27:59 +0000738 break;
739 // T __atomic_fetch_and_N(T *mem, T val, int order)
740 case AtomicExpr::AO__c11_atomic_fetch_and:
741 case AtomicExpr::AO__atomic_fetch_and:
742 LibCallName = "__atomic_fetch_and";
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000743 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
Stephen Hines176edba2014-12-01 14:53:08 -0800744 E->getExprLoc(), sizeChars);
Ed Schoutene4692492013-05-31 19:27:59 +0000745 break;
746 // T __atomic_fetch_or_N(T *mem, T val, int order)
747 case AtomicExpr::AO__c11_atomic_fetch_or:
748 case AtomicExpr::AO__atomic_fetch_or:
749 LibCallName = "__atomic_fetch_or";
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000750 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
Stephen Hines176edba2014-12-01 14:53:08 -0800751 E->getExprLoc(), sizeChars);
Ed Schoutene4692492013-05-31 19:27:59 +0000752 break;
753 // T __atomic_fetch_sub_N(T *mem, T val, int order)
754 case AtomicExpr::AO__c11_atomic_fetch_sub:
755 case AtomicExpr::AO__atomic_fetch_sub:
756 LibCallName = "__atomic_fetch_sub";
Stephen Hines651f13c2014-04-23 16:59:28 -0700757 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
Stephen Hines176edba2014-12-01 14:53:08 -0800758 E->getExprLoc(), sizeChars);
Ed Schoutene4692492013-05-31 19:27:59 +0000759 break;
760 // T __atomic_fetch_xor_N(T *mem, T val, int order)
761 case AtomicExpr::AO__c11_atomic_fetch_xor:
762 case AtomicExpr::AO__atomic_fetch_xor:
763 LibCallName = "__atomic_fetch_xor";
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000764 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
Stephen Hines176edba2014-12-01 14:53:08 -0800765 E->getExprLoc(), sizeChars);
John McCallfafaaef2013-03-07 21:37:12 +0000766 break;
John McCallfafaaef2013-03-07 21:37:12 +0000767 default: return EmitUnsupportedRValue(E, "atomic library call");
768 }
Ed Schoutene4692492013-05-31 19:27:59 +0000769
770 // Optimized functions have the size in their name.
771 if (UseOptimizedLibcall)
772 LibCallName += "_" + llvm::utostr(Size);
773 // By default, assume we return a value of the atomic type.
774 if (!HaveRetTy) {
775 if (UseOptimizedLibcall) {
776 // Value is returned directly.
Stephen Hines176edba2014-12-01 14:53:08 -0800777 // The function returns an appropriately sized integer type.
778 RetTy = getContext().getIntTypeForBitwidth(
779 getContext().toBits(sizeChars), /*Signed=*/false);
Ed Schoutene4692492013-05-31 19:27:59 +0000780 } else {
781 // Value is returned through parameter before the order.
782 RetTy = getContext().VoidTy;
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700783 Args.add(RValue::get(EmitCastToVoidPtr(Dest)), getContext().VoidPtrTy);
Ed Schoutene4692492013-05-31 19:27:59 +0000784 }
785 }
John McCallfafaaef2013-03-07 21:37:12 +0000786 // order is always the last parameter
787 Args.add(RValue::get(Order),
788 getContext().IntTy);
789
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700790 RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
791 // The value is returned directly from the libcall.
792 if (HaveRetTy && !RetTy->isVoidType())
793 return Res;
794 // The value is returned via an explicit out param.
795 if (RetTy->isVoidType())
Stephen Hines6bcf27b2014-05-29 04:14:42 -0700796 return RValue::get(nullptr);
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700797 // The value is returned directly for optimized libcalls but the caller is
798 // expected an out-param.
799 if (UseOptimizedLibcall) {
800 llvm::Value *ResVal = Res.getScalarVal();
801 llvm::StoreInst *StoreDest = Builder.CreateStore(
802 ResVal,
803 Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo()));
804 StoreDest->setAlignment(Align);
805 }
806 return convertTempToRValue(Dest, RValTy, E->getExprLoc());
John McCallfafaaef2013-03-07 21:37:12 +0000807 }
808
809 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
810 E->getOp() == AtomicExpr::AO__atomic_store ||
811 E->getOp() == AtomicExpr::AO__atomic_store_n;
812 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
813 E->getOp() == AtomicExpr::AO__atomic_load ||
814 E->getOp() == AtomicExpr::AO__atomic_load_n;
815
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700816 llvm::Type *ITy =
817 llvm::IntegerType::get(getLLVMContext(), Size * 8);
818 llvm::Value *OrigDest = GetDest();
819 Ptr = Builder.CreateBitCast(
820 Ptr, ITy->getPointerTo(Ptr->getType()->getPointerAddressSpace()));
821 if (Val1) Val1 = Builder.CreateBitCast(Val1, ITy->getPointerTo());
822 if (Val2) Val2 = Builder.CreateBitCast(Val2, ITy->getPointerTo());
823 if (Dest && !E->isCmpXChg())
824 Dest = Builder.CreateBitCast(Dest, ITy->getPointerTo());
John McCallfafaaef2013-03-07 21:37:12 +0000825
826 if (isa<llvm::ConstantInt>(Order)) {
827 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
828 switch (ord) {
Stephen Hines651f13c2014-04-23 16:59:28 -0700829 case AtomicExpr::AO_ABI_memory_order_relaxed:
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700830 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Stephen Hines651f13c2014-04-23 16:59:28 -0700831 Size, Align, llvm::Monotonic);
John McCallfafaaef2013-03-07 21:37:12 +0000832 break;
Stephen Hines651f13c2014-04-23 16:59:28 -0700833 case AtomicExpr::AO_ABI_memory_order_consume:
834 case AtomicExpr::AO_ABI_memory_order_acquire:
John McCallfafaaef2013-03-07 21:37:12 +0000835 if (IsStore)
836 break; // Avoid crashing on code with undefined behavior
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700837 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Stephen Hines651f13c2014-04-23 16:59:28 -0700838 Size, Align, llvm::Acquire);
John McCallfafaaef2013-03-07 21:37:12 +0000839 break;
Stephen Hines651f13c2014-04-23 16:59:28 -0700840 case AtomicExpr::AO_ABI_memory_order_release:
John McCallfafaaef2013-03-07 21:37:12 +0000841 if (IsLoad)
842 break; // Avoid crashing on code with undefined behavior
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700843 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Stephen Hines651f13c2014-04-23 16:59:28 -0700844 Size, Align, llvm::Release);
John McCallfafaaef2013-03-07 21:37:12 +0000845 break;
Stephen Hines651f13c2014-04-23 16:59:28 -0700846 case AtomicExpr::AO_ABI_memory_order_acq_rel:
John McCallfafaaef2013-03-07 21:37:12 +0000847 if (IsLoad || IsStore)
848 break; // Avoid crashing on code with undefined behavior
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700849 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Stephen Hines651f13c2014-04-23 16:59:28 -0700850 Size, Align, llvm::AcquireRelease);
John McCallfafaaef2013-03-07 21:37:12 +0000851 break;
Stephen Hines651f13c2014-04-23 16:59:28 -0700852 case AtomicExpr::AO_ABI_memory_order_seq_cst:
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700853 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Stephen Hines651f13c2014-04-23 16:59:28 -0700854 Size, Align, llvm::SequentiallyConsistent);
John McCallfafaaef2013-03-07 21:37:12 +0000855 break;
856 default: // invalid order
857 // We should not ever get here normally, but it's hard to
858 // enforce that in general.
859 break;
860 }
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700861 if (RValTy->isVoidType())
Stephen Hines6bcf27b2014-05-29 04:14:42 -0700862 return RValue::get(nullptr);
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700863 return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
John McCallfafaaef2013-03-07 21:37:12 +0000864 }
865
866 // Long case, when Order isn't obviously constant.
867
868 // Create all the relevant BB's
Stephen Hines6bcf27b2014-05-29 04:14:42 -0700869 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
870 *ReleaseBB = nullptr, *AcqRelBB = nullptr,
871 *SeqCstBB = nullptr;
John McCallfafaaef2013-03-07 21:37:12 +0000872 MonotonicBB = createBasicBlock("monotonic", CurFn);
873 if (!IsStore)
874 AcquireBB = createBasicBlock("acquire", CurFn);
875 if (!IsLoad)
876 ReleaseBB = createBasicBlock("release", CurFn);
877 if (!IsLoad && !IsStore)
878 AcqRelBB = createBasicBlock("acqrel", CurFn);
879 SeqCstBB = createBasicBlock("seqcst", CurFn);
880 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
881
882 // Create the switch for the split
883 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
884 // doesn't matter unless someone is crazy enough to use something that
885 // doesn't fold to a constant for the ordering.
886 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
887 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
888
889 // Emit all the different atomics
890 Builder.SetInsertPoint(MonotonicBB);
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700891 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Stephen Hines651f13c2014-04-23 16:59:28 -0700892 Size, Align, llvm::Monotonic);
John McCallfafaaef2013-03-07 21:37:12 +0000893 Builder.CreateBr(ContBB);
894 if (!IsStore) {
895 Builder.SetInsertPoint(AcquireBB);
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700896 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Stephen Hines651f13c2014-04-23 16:59:28 -0700897 Size, Align, llvm::Acquire);
John McCallfafaaef2013-03-07 21:37:12 +0000898 Builder.CreateBr(ContBB);
Stephen Hines651f13c2014-04-23 16:59:28 -0700899 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
900 AcquireBB);
901 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
902 AcquireBB);
John McCallfafaaef2013-03-07 21:37:12 +0000903 }
904 if (!IsLoad) {
905 Builder.SetInsertPoint(ReleaseBB);
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700906 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Stephen Hines651f13c2014-04-23 16:59:28 -0700907 Size, Align, llvm::Release);
John McCallfafaaef2013-03-07 21:37:12 +0000908 Builder.CreateBr(ContBB);
Stephen Hines651f13c2014-04-23 16:59:28 -0700909 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
910 ReleaseBB);
John McCallfafaaef2013-03-07 21:37:12 +0000911 }
912 if (!IsLoad && !IsStore) {
913 Builder.SetInsertPoint(AcqRelBB);
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700914 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Stephen Hines651f13c2014-04-23 16:59:28 -0700915 Size, Align, llvm::AcquireRelease);
John McCallfafaaef2013-03-07 21:37:12 +0000916 Builder.CreateBr(ContBB);
Stephen Hines651f13c2014-04-23 16:59:28 -0700917 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
918 AcqRelBB);
John McCallfafaaef2013-03-07 21:37:12 +0000919 }
920 Builder.SetInsertPoint(SeqCstBB);
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700921 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Stephen Hines651f13c2014-04-23 16:59:28 -0700922 Size, Align, llvm::SequentiallyConsistent);
John McCallfafaaef2013-03-07 21:37:12 +0000923 Builder.CreateBr(ContBB);
Stephen Hines651f13c2014-04-23 16:59:28 -0700924 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
925 SeqCstBB);
John McCallfafaaef2013-03-07 21:37:12 +0000926
927 // Cleanup and return
928 Builder.SetInsertPoint(ContBB);
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700929 if (RValTy->isVoidType())
Stephen Hines6bcf27b2014-05-29 04:14:42 -0700930 return RValue::get(nullptr);
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700931 return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
John McCallfafaaef2013-03-07 21:37:12 +0000932}
John McCall9eda3ab2013-03-07 21:37:17 +0000933
934llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
935 unsigned addrspace =
936 cast<llvm::PointerType>(addr->getType())->getAddressSpace();
937 llvm::IntegerType *ty =
938 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
939 return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
940}
941
942RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
Nick Lewycky4ee7dc22013-10-02 02:29:49 +0000943 AggValueSlot resultSlot,
944 SourceLocation loc) const {
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700945 if (LVal.isSimple()) {
946 if (EvaluationKind == TEK_Aggregate)
947 return resultSlot.asRValue();
John McCall9eda3ab2013-03-07 21:37:17 +0000948
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700949 // Drill into the padding structure if we have one.
950 if (hasPadding())
951 addr = CGF.Builder.CreateStructGEP(addr, 0);
John McCall9eda3ab2013-03-07 21:37:17 +0000952
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700953 // Otherwise, just convert the temporary to an r-value using the
954 // normal conversion routine.
955 return CGF.convertTempToRValue(addr, getValueType(), loc);
956 } else if (LVal.isBitField())
957 return CGF.EmitLoadOfBitfieldLValue(LValue::MakeBitfield(
958 addr, LVal.getBitFieldInfo(), LVal.getType(), LVal.getAlignment()));
959 else if (LVal.isVectorElt())
960 return CGF.EmitLoadOfLValue(LValue::MakeVectorElt(addr, LVal.getVectorIdx(),
961 LVal.getType(),
962 LVal.getAlignment()),
963 loc);
964 assert(LVal.isExtVectorElt());
965 return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
966 addr, LVal.getExtVectorElts(), LVal.getType(), LVal.getAlignment()));
967}
968
969RValue AtomicInfo::convertIntToValue(llvm::Value *IntVal,
970 AggValueSlot ResultSlot,
971 SourceLocation Loc) const {
972 assert(LVal.isSimple());
973 // Try not to in some easy cases.
974 assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
975 if (getEvaluationKind() == TEK_Scalar && !hasPadding()) {
976 auto *ValTy = CGF.ConvertTypeForMem(ValueTy);
977 if (ValTy->isIntegerTy()) {
978 assert(IntVal->getType() == ValTy && "Different integer types.");
979 return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
980 } else if (ValTy->isPointerTy())
981 return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
982 else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
983 return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
984 }
985
986 // Create a temporary. This needs to be big enough to hold the
987 // atomic integer.
988 llvm::Value *Temp;
989 bool TempIsVolatile = false;
990 CharUnits TempAlignment;
991 if (getEvaluationKind() == TEK_Aggregate) {
992 assert(!ResultSlot.isIgnored());
993 Temp = ResultSlot.getAddr();
994 TempAlignment = getValueAlignment();
995 TempIsVolatile = ResultSlot.isVolatile();
996 } else {
997 Temp = CGF.CreateMemTemp(getAtomicType(), "atomic-temp");
998 TempAlignment = getAtomicAlignment();
999 }
1000
1001 // Slam the integer into the temporary.
1002 llvm::Value *CastTemp = emitCastToAtomicIntPointer(Temp);
1003 CGF.Builder.CreateAlignedStore(IntVal, CastTemp, TempAlignment.getQuantity())
1004 ->setVolatile(TempIsVolatile);
1005
1006 return convertTempToRValue(Temp, ResultSlot, Loc);
1007}
1008
1009/// An LValue is a candidate for having its loads and stores be made atomic if
1010/// we are operating under /volatile:ms *and* the LValue itself is volatile and
1011/// performing such an operation can be performed without a libcall.
1012bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
1013 AtomicInfo AI(*this, LV);
1014 bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1015 // An atomic is inline if we don't need to use a libcall.
1016 bool AtomicIsInline = !AI.shouldUseLibcall();
1017 return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline;
1018}
1019
1020/// An type is a candidate for having its loads and stores be made atomic if
1021/// we are operating under /volatile:ms *and* we know the access is volatile and
1022/// performing such an operation can be performed without a libcall.
1023bool CodeGenFunction::typeIsSuitableForInlineAtomic(QualType Ty,
1024 bool IsVolatile) const {
1025 // An atomic is inline if we don't need to use a libcall (e.g. it is builtin).
1026 bool AtomicIsInline = getContext().getTargetInfo().hasBuiltinAtomic(
1027 getContext().getTypeSize(Ty), getContext().getTypeAlign(Ty));
1028 return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline;
1029}
1030
1031RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
1032 AggValueSlot Slot) {
1033 llvm::AtomicOrdering AO;
1034 bool IsVolatile = LV.isVolatileQualified();
1035 if (LV.getType()->isAtomicType()) {
1036 AO = llvm::SequentiallyConsistent;
1037 } else {
1038 AO = llvm::Acquire;
1039 IsVolatile = true;
1040 }
1041 return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
John McCall9eda3ab2013-03-07 21:37:17 +00001042}
1043
1044/// Emit a load from an l-value of atomic type. Note that the r-value
1045/// we produce is an r-value of the atomic *value* type.
Nick Lewycky4ee7dc22013-10-02 02:29:49 +00001046RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001047 llvm::AtomicOrdering AO, bool IsVolatile,
Nick Lewycky4ee7dc22013-10-02 02:29:49 +00001048 AggValueSlot resultSlot) {
John McCall9eda3ab2013-03-07 21:37:17 +00001049 AtomicInfo atomics(*this, src);
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001050 LValue LVal = atomics.getAtomicLValue();
1051 llvm::Value *SrcAddr = nullptr;
1052 llvm::AllocaInst *NonSimpleTempAlloca = nullptr;
1053 if (LVal.isSimple())
1054 SrcAddr = LVal.getAddress();
1055 else {
1056 if (LVal.isBitField())
1057 SrcAddr = LVal.getBitFieldAddr();
1058 else if (LVal.isVectorElt())
1059 SrcAddr = LVal.getVectorAddr();
1060 else {
1061 assert(LVal.isExtVectorElt());
1062 SrcAddr = LVal.getExtVectorAddr();
1063 }
1064 NonSimpleTempAlloca = CreateTempAlloca(
1065 SrcAddr->getType()->getPointerElementType(), "atomic-load-temp");
1066 NonSimpleTempAlloca->setAlignment(getContext().toBits(src.getAlignment()));
1067 }
John McCall9eda3ab2013-03-07 21:37:17 +00001068
1069 // Check whether we should use a library call.
1070 if (atomics.shouldUseLibcall()) {
1071 llvm::Value *tempAddr;
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001072 if (LVal.isSimple()) {
1073 if (!resultSlot.isIgnored()) {
1074 assert(atomics.getEvaluationKind() == TEK_Aggregate);
1075 tempAddr = resultSlot.getAddr();
1076 } else
1077 tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
1078 } else
1079 tempAddr = NonSimpleTempAlloca;
John McCall9eda3ab2013-03-07 21:37:17 +00001080
1081 // void __atomic_load(size_t size, void *mem, void *return, int order);
1082 CallArgList args;
1083 args.add(RValue::get(atomics.getAtomicSizeValue()),
1084 getContext().getSizeType());
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001085 args.add(RValue::get(EmitCastToVoidPtr(SrcAddr)), getContext().VoidPtrTy);
1086 args.add(RValue::get(EmitCastToVoidPtr(tempAddr)), getContext().VoidPtrTy);
Stephen Hines651f13c2014-04-23 16:59:28 -07001087 args.add(RValue::get(llvm::ConstantInt::get(
1088 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
John McCall9eda3ab2013-03-07 21:37:17 +00001089 getContext().IntTy);
1090 emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
1091
1092 // Produce the r-value.
Nick Lewycky4ee7dc22013-10-02 02:29:49 +00001093 return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
John McCall9eda3ab2013-03-07 21:37:17 +00001094 }
1095
1096 // Okay, we're doing this natively.
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001097 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(SrcAddr);
John McCall9eda3ab2013-03-07 21:37:17 +00001098 llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001099 load->setAtomic(AO);
John McCall9eda3ab2013-03-07 21:37:17 +00001100
1101 // Other decoration.
1102 load->setAlignment(src.getAlignment().getQuantity());
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001103 if (IsVolatile)
John McCall9eda3ab2013-03-07 21:37:17 +00001104 load->setVolatile(true);
1105 if (src.getTBAAInfo())
1106 CGM.DecorateInstruction(load, src.getTBAAInfo());
1107
John McCall9eda3ab2013-03-07 21:37:17 +00001108 // If we're ignoring an aggregate return, don't do anything.
1109 if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
Stephen Hines6bcf27b2014-05-29 04:14:42 -07001110 return RValue::getAggregate(nullptr, false);
John McCall9eda3ab2013-03-07 21:37:17 +00001111
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001112 // Okay, turn that back into the original value type.
1113 if (src.isSimple())
1114 return atomics.convertIntToValue(load, resultSlot, loc);
John McCall9eda3ab2013-03-07 21:37:17 +00001115
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001116 auto *IntAddr = atomics.emitCastToAtomicIntPointer(NonSimpleTempAlloca);
1117 Builder.CreateAlignedStore(load, IntAddr, src.getAlignment().getQuantity());
1118 return atomics.convertTempToRValue(NonSimpleTempAlloca, resultSlot, loc);
John McCall9eda3ab2013-03-07 21:37:17 +00001119}
1120
1121
1122
1123/// Copy an r-value into memory as part of storing to an atomic type.
1124/// This needs to create a bit-pattern suitable for atomic operations.
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001125void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1126 assert(LVal.isSimple());
John McCall9eda3ab2013-03-07 21:37:17 +00001127 // If we have an r-value, the rvalue should be of the atomic type,
1128 // which means that the caller is responsible for having zeroed
1129 // any padding. Just do an aggregate copy of that type.
1130 if (rvalue.isAggregate()) {
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001131 CGF.EmitAggregateCopy(LVal.getAddress(),
John McCall9eda3ab2013-03-07 21:37:17 +00001132 rvalue.getAggregateAddr(),
1133 getAtomicType(),
1134 (rvalue.isVolatileQualified()
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001135 || LVal.isVolatileQualified()),
1136 LVal.getAlignment());
John McCall9eda3ab2013-03-07 21:37:17 +00001137 return;
1138 }
1139
1140 // Okay, otherwise we're copying stuff.
1141
1142 // Zero out the buffer if necessary.
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001143 emitMemSetZeroIfNecessary();
John McCall9eda3ab2013-03-07 21:37:17 +00001144
1145 // Drill past the padding if present.
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001146 LValue TempLVal = projectValue();
John McCall9eda3ab2013-03-07 21:37:17 +00001147
1148 // Okay, store the rvalue in.
1149 if (rvalue.isScalar()) {
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001150 CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
John McCall9eda3ab2013-03-07 21:37:17 +00001151 } else {
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001152 CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
John McCall9eda3ab2013-03-07 21:37:17 +00001153 }
1154}
1155
1156
1157/// Materialize an r-value into memory for the purposes of storing it
1158/// to an atomic type.
1159llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
1160 // Aggregate r-values are already in memory, and EmitAtomicStore
1161 // requires them to be values of the atomic type.
1162 if (rvalue.isAggregate())
1163 return rvalue.getAggregateAddr();
1164
1165 // Otherwise, make a temporary and materialize into it.
1166 llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001167 LValue tempLV =
1168 CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
1169 AtomicInfo Atomics(CGF, tempLV);
1170 Atomics.emitCopyIntoMemory(rvalue);
John McCall9eda3ab2013-03-07 21:37:17 +00001171 return temp;
1172}
1173
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001174llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1175 // If we've got a scalar value of the right size, try to avoid going
1176 // through memory.
1177 if (RVal.isScalar() && !hasPadding()) {
1178 llvm::Value *Value = RVal.getScalarVal();
1179 if (isa<llvm::IntegerType>(Value->getType()))
1180 return Value;
1181 else {
1182 llvm::IntegerType *InputIntTy =
1183 llvm::IntegerType::get(CGF.getLLVMContext(), getValueSizeInBits());
1184 if (isa<llvm::PointerType>(Value->getType()))
1185 return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1186 else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1187 return CGF.Builder.CreateBitCast(Value, InputIntTy);
1188 }
1189 }
1190 // Otherwise, we need to go through memory.
1191 // Put the r-value in memory.
1192 llvm::Value *Addr = materializeRValue(RVal);
1193
1194 // Cast the temporary to the atomic int type and pull a value out.
1195 Addr = emitCastToAtomicIntPointer(Addr);
1196 return CGF.Builder.CreateAlignedLoad(Addr,
1197 getAtomicAlignment().getQuantity());
1198}
1199
1200void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
1201 bool isInit) {
1202 bool IsVolatile = lvalue.isVolatileQualified();
1203 llvm::AtomicOrdering AO;
1204 if (lvalue.getType()->isAtomicType()) {
1205 AO = llvm::SequentiallyConsistent;
1206 } else {
1207 AO = llvm::Release;
1208 IsVolatile = true;
1209 }
1210 return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1211}
1212
John McCall9eda3ab2013-03-07 21:37:17 +00001213/// Emit a store to an l-value of atomic type.
1214///
1215/// Note that the r-value is expected to be an r-value *of the atomic
1216/// type*; this means that for aggregate r-values, it should include
1217/// storage for any padding that was necessary.
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001218void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
1219 llvm::AtomicOrdering AO, bool IsVolatile,
1220 bool isInit) {
John McCall9eda3ab2013-03-07 21:37:17 +00001221 // If this is an aggregate r-value, it should agree in type except
1222 // maybe for address-space qualification.
1223 assert(!rvalue.isAggregate() ||
1224 rvalue.getAggregateAddr()->getType()->getPointerElementType()
1225 == dest.getAddress()->getType()->getPointerElementType());
1226
1227 AtomicInfo atomics(*this, dest);
1228
1229 // If this is an initialization, just put the value there normally.
1230 if (isInit) {
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001231 atomics.emitCopyIntoMemory(rvalue);
John McCall9eda3ab2013-03-07 21:37:17 +00001232 return;
1233 }
1234
1235 // Check whether we should use a library call.
1236 if (atomics.shouldUseLibcall()) {
1237 // Produce a source address.
1238 llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
1239
1240 // void __atomic_store(size_t size, void *mem, void *val, int order)
1241 CallArgList args;
1242 args.add(RValue::get(atomics.getAtomicSizeValue()),
1243 getContext().getSizeType());
1244 args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
1245 getContext().VoidPtrTy);
1246 args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
1247 getContext().VoidPtrTy);
Stephen Hines651f13c2014-04-23 16:59:28 -07001248 args.add(RValue::get(llvm::ConstantInt::get(
1249 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
John McCall9eda3ab2013-03-07 21:37:17 +00001250 getContext().IntTy);
1251 emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1252 return;
1253 }
1254
1255 // Okay, we're doing this natively.
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001256 llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
John McCall9eda3ab2013-03-07 21:37:17 +00001257
1258 // Do the atomic store.
1259 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
1260 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1261
1262 // Initializations don't need to be atomic.
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001263 if (!isInit) store->setAtomic(AO);
John McCall9eda3ab2013-03-07 21:37:17 +00001264
1265 // Other decoration.
1266 store->setAlignment(dest.getAlignment().getQuantity());
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001267 if (IsVolatile)
John McCall9eda3ab2013-03-07 21:37:17 +00001268 store->setVolatile(true);
1269 if (dest.getTBAAInfo())
1270 CGM.DecorateInstruction(store, dest.getTBAAInfo());
1271}
1272
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001273/// Emit a compare-and-exchange op for atomic type.
1274///
1275std::pair<RValue, RValue> CodeGenFunction::EmitAtomicCompareExchange(
1276 LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
1277 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
1278 AggValueSlot Slot) {
1279 // If this is an aggregate r-value, it should agree in type except
1280 // maybe for address-space qualification.
1281 assert(!Expected.isAggregate() ||
1282 Expected.getAggregateAddr()->getType()->getPointerElementType() ==
1283 Obj.getAddress()->getType()->getPointerElementType());
1284 assert(!Desired.isAggregate() ||
1285 Desired.getAggregateAddr()->getType()->getPointerElementType() ==
1286 Obj.getAddress()->getType()->getPointerElementType());
1287 AtomicInfo Atomics(*this, Obj);
1288
1289 if (Failure >= Success)
1290 // Don't assert on undefined behavior.
1291 Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1292
1293 auto Alignment = Atomics.getValueAlignment();
1294 // Check whether we should use a library call.
1295 if (Atomics.shouldUseLibcall()) {
1296 auto *ExpectedAddr = Atomics.materializeRValue(Expected);
1297 // Produce a source address.
1298 auto *DesiredAddr = Atomics.materializeRValue(Desired);
1299 // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1300 // void *desired, int success, int failure);
1301 CallArgList Args;
1302 Args.add(RValue::get(Atomics.getAtomicSizeValue()),
1303 getContext().getSizeType());
1304 Args.add(RValue::get(EmitCastToVoidPtr(Obj.getAddress())),
1305 getContext().VoidPtrTy);
1306 Args.add(RValue::get(EmitCastToVoidPtr(ExpectedAddr)),
1307 getContext().VoidPtrTy);
1308 Args.add(RValue::get(EmitCastToVoidPtr(DesiredAddr)),
1309 getContext().VoidPtrTy);
1310 Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Success)),
1311 getContext().IntTy);
1312 Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Failure)),
1313 getContext().IntTy);
1314 auto SuccessFailureRVal = emitAtomicLibcall(
1315 *this, "__atomic_compare_exchange", getContext().BoolTy, Args);
1316 auto *PreviousVal =
1317 Builder.CreateAlignedLoad(ExpectedAddr, Alignment.getQuantity());
1318 return std::make_pair(RValue::get(PreviousVal), SuccessFailureRVal);
1319 }
1320
1321 // If we've got a scalar value of the right size, try to avoid going
1322 // through memory.
1323 auto *ExpectedIntVal = Atomics.convertRValueToInt(Expected);
1324 auto *DesiredIntVal = Atomics.convertRValueToInt(Desired);
1325
1326 // Do the atomic store.
1327 auto *Addr = Atomics.emitCastToAtomicIntPointer(Obj.getAddress());
1328 auto *Inst = Builder.CreateAtomicCmpXchg(Addr, ExpectedIntVal, DesiredIntVal,
1329 Success, Failure);
1330 // Other decoration.
1331 Inst->setVolatile(Obj.isVolatileQualified());
1332 Inst->setWeak(IsWeak);
1333
1334 // Okay, turn that back into the original value type.
1335 auto *PreviousVal = Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1336 auto *SuccessFailureVal = Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1337 return std::make_pair(Atomics.convertIntToValue(PreviousVal, Slot, Loc),
1338 RValue::get(SuccessFailureVal));
1339}
1340
John McCall9eda3ab2013-03-07 21:37:17 +00001341void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1342 AtomicInfo atomics(*this, dest);
1343
1344 switch (atomics.getEvaluationKind()) {
1345 case TEK_Scalar: {
1346 llvm::Value *value = EmitScalarExpr(init);
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001347 atomics.emitCopyIntoMemory(RValue::get(value));
John McCall9eda3ab2013-03-07 21:37:17 +00001348 return;
1349 }
1350
1351 case TEK_Complex: {
1352 ComplexPairTy value = EmitComplexExpr(init);
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001353 atomics.emitCopyIntoMemory(RValue::getComplex(value));
John McCall9eda3ab2013-03-07 21:37:17 +00001354 return;
1355 }
1356
1357 case TEK_Aggregate: {
Eli Friedman336d9df2013-07-11 01:32:21 +00001358 // Fix up the destination if the initializer isn't an expression
1359 // of atomic type.
1360 bool Zeroed = false;
John McCall9eda3ab2013-03-07 21:37:17 +00001361 if (!init->getType()->isAtomicType()) {
Stephen Hines0e2c34f2015-03-23 12:09:02 -07001362 Zeroed = atomics.emitMemSetZeroIfNecessary();
1363 dest = atomics.projectValue();
John McCall9eda3ab2013-03-07 21:37:17 +00001364 }
1365
1366 // Evaluate the expression directly into the destination.
1367 AggValueSlot slot = AggValueSlot::forLValue(dest,
1368 AggValueSlot::IsNotDestructed,
1369 AggValueSlot::DoesNotNeedGCBarriers,
Eli Friedman336d9df2013-07-11 01:32:21 +00001370 AggValueSlot::IsNotAliased,
1371 Zeroed ? AggValueSlot::IsZeroed :
1372 AggValueSlot::IsNotZeroed);
1373
John McCall9eda3ab2013-03-07 21:37:17 +00001374 EmitAggExpr(init, slot);
1375 return;
1376 }
1377 }
1378 llvm_unreachable("bad evaluation kind");
1379}