blob: 927195083d45585630e960340b4d5a823555faa1 [file] [log] [blame]
John McCallfc207f22013-03-07 21:37:12 +00001//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the code for emitting atomic operations.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCall.h"
Alexey Bataevb57056f2015-01-22 06:17:56 +000016#include "CGRecordLayout.h"
John McCallfc207f22013-03-07 21:37:12 +000017#include "CodeGenModule.h"
18#include "clang/AST/ASTContext.h"
Mark Laceya8e7df32013-10-30 21:53:58 +000019#include "clang/CodeGen/CGFunctionInfo.h"
Ed Schoutenc7e82bd2013-05-31 19:27:59 +000020#include "llvm/ADT/StringExtras.h"
John McCallfc207f22013-03-07 21:37:12 +000021#include "llvm/IR/DataLayout.h"
22#include "llvm/IR/Intrinsics.h"
John McCalla8ec7eb2013-03-07 21:37:17 +000023#include "llvm/IR/Operator.h"
John McCallfc207f22013-03-07 21:37:12 +000024
25using namespace clang;
26using namespace CodeGen;
27
John McCalla8ec7eb2013-03-07 21:37:17 +000028namespace {
29 class AtomicInfo {
30 CodeGenFunction &CGF;
31 QualType AtomicTy;
32 QualType ValueTy;
33 uint64_t AtomicSizeInBits;
34 uint64_t ValueSizeInBits;
35 CharUnits AtomicAlign;
36 CharUnits ValueAlign;
37 CharUnits LValueAlign;
38 TypeEvaluationKind EvaluationKind;
39 bool UseLibcall;
Alexey Bataevb57056f2015-01-22 06:17:56 +000040 LValue LVal;
41 CGBitFieldInfo BFI;
John McCalla8ec7eb2013-03-07 21:37:17 +000042 public:
Alexey Bataevb57056f2015-01-22 06:17:56 +000043 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45 EvaluationKind(TEK_Scalar), UseLibcall(true) {
46 assert(!lvalue.isGlobalReg());
John McCalla8ec7eb2013-03-07 21:37:17 +000047 ASTContext &C = CGF.getContext();
Alexey Bataevb57056f2015-01-22 06:17:56 +000048 if (lvalue.isSimple()) {
49 AtomicTy = lvalue.getType();
50 if (auto *ATy = AtomicTy->getAs<AtomicType>())
51 ValueTy = ATy->getValueType();
52 else
53 ValueTy = AtomicTy;
54 EvaluationKind = CGF.getEvaluationKind(ValueTy);
John McCalla8ec7eb2013-03-07 21:37:17 +000055
Alexey Bataevb57056f2015-01-22 06:17:56 +000056 uint64_t ValueAlignInBits;
57 uint64_t AtomicAlignInBits;
58 TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59 ValueSizeInBits = ValueTI.Width;
60 ValueAlignInBits = ValueTI.Align;
John McCalla8ec7eb2013-03-07 21:37:17 +000061
Alexey Bataevb57056f2015-01-22 06:17:56 +000062 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63 AtomicSizeInBits = AtomicTI.Width;
64 AtomicAlignInBits = AtomicTI.Align;
John McCalla8ec7eb2013-03-07 21:37:17 +000065
Alexey Bataevb57056f2015-01-22 06:17:56 +000066 assert(ValueSizeInBits <= AtomicSizeInBits);
67 assert(ValueAlignInBits <= AtomicAlignInBits);
John McCalla8ec7eb2013-03-07 21:37:17 +000068
Alexey Bataevb57056f2015-01-22 06:17:56 +000069 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71 if (lvalue.getAlignment().isZero())
72 lvalue.setAlignment(AtomicAlign);
John McCalla8ec7eb2013-03-07 21:37:17 +000073
Alexey Bataevb57056f2015-01-22 06:17:56 +000074 LVal = lvalue;
75 } else if (lvalue.isBitField()) {
76 auto &OrigBFI = lvalue.getBitFieldInfo();
77 auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
78 AtomicSizeInBits = C.toBits(
79 C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
80 .RoundUpToAlignment(lvalue.getAlignment()));
81 auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldAddr());
82 auto OffsetInChars =
83 (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
84 lvalue.getAlignment();
85 VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
86 VoidPtrAddr, OffsetInChars.getQuantity());
87 auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
88 VoidPtrAddr,
89 CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
90 "atomic_bitfield_base");
91 BFI = OrigBFI;
92 BFI.Offset = Offset;
93 BFI.StorageSize = AtomicSizeInBits;
94 LVal = LValue::MakeBitfield(Addr, BFI, lvalue.getType(),
95 lvalue.getAlignment());
96 } else if (lvalue.isVectorElt()) {
97 AtomicSizeInBits = C.getTypeSize(lvalue.getType());
98 LVal = lvalue;
99 } else {
100 assert(lvalue.isExtVectorElt());
101 AtomicSizeInBits = C.getTypeSize(lvalue.getType());
102 LVal = lvalue;
103 }
Alexey Bataev452d8e12014-12-15 05:25:25 +0000104 UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
105 AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
John McCalla8ec7eb2013-03-07 21:37:17 +0000106 }
107
108 QualType getAtomicType() const { return AtomicTy; }
109 QualType getValueType() const { return ValueTy; }
110 CharUnits getAtomicAlignment() const { return AtomicAlign; }
111 CharUnits getValueAlignment() const { return ValueAlign; }
112 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
Alexey Bataev452d8e12014-12-15 05:25:25 +0000113 uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
John McCalla8ec7eb2013-03-07 21:37:17 +0000114 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
115 bool shouldUseLibcall() const { return UseLibcall; }
Alexey Bataevb57056f2015-01-22 06:17:56 +0000116 const LValue &getAtomicLValue() const { return LVal; }
John McCalla8ec7eb2013-03-07 21:37:17 +0000117
118 /// Is the atomic size larger than the underlying value type?
119 ///
120 /// Note that the absence of padding does not mean that atomic
121 /// objects are completely interchangeable with non-atomic
122 /// objects: we might have promoted the alignment of a type
123 /// without making it bigger.
124 bool hasPadding() const {
125 return (ValueSizeInBits != AtomicSizeInBits);
126 }
127
Alexey Bataevb57056f2015-01-22 06:17:56 +0000128 bool emitMemSetZeroIfNecessary() const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000129
130 llvm::Value *getAtomicSizeValue() const {
131 CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
132 return CGF.CGM.getSize(size);
133 }
134
135 /// Cast the given pointer to an integer pointer suitable for
136 /// atomic operations.
137 llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
138
139 /// Turn an atomic-layout object into an r-value.
140 RValue convertTempToRValue(llvm::Value *addr,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000141 AggValueSlot resultSlot,
142 SourceLocation loc) const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000143
Alexey Bataev452d8e12014-12-15 05:25:25 +0000144 /// \brief Converts a rvalue to integer value.
145 llvm::Value *convertRValueToInt(RValue RVal) const;
146
147 RValue convertIntToValue(llvm::Value *IntVal, AggValueSlot ResultSlot,
148 SourceLocation Loc) const;
149
John McCalla8ec7eb2013-03-07 21:37:17 +0000150 /// Copy an atomic r-value into atomic-layout memory.
Alexey Bataevb57056f2015-01-22 06:17:56 +0000151 void emitCopyIntoMemory(RValue rvalue) const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000152
153 /// Project an l-value down to the value field.
Alexey Bataevb57056f2015-01-22 06:17:56 +0000154 LValue projectValue() const {
155 assert(LVal.isSimple());
156 llvm::Value *addr = LVal.getAddress();
John McCalla8ec7eb2013-03-07 21:37:17 +0000157 if (hasPadding())
158 addr = CGF.Builder.CreateStructGEP(addr, 0);
159
Alexey Bataevb57056f2015-01-22 06:17:56 +0000160 return LValue::MakeAddr(addr, getValueType(), LVal.getAlignment(),
161 CGF.getContext(), LVal.getTBAAInfo());
John McCalla8ec7eb2013-03-07 21:37:17 +0000162 }
163
164 /// Materialize an atomic r-value in atomic-layout memory.
165 llvm::Value *materializeRValue(RValue rvalue) const;
166
167 private:
168 bool requiresMemSetZero(llvm::Type *type) const;
169 };
170}
171
172static RValue emitAtomicLibcall(CodeGenFunction &CGF,
173 StringRef fnName,
174 QualType resultType,
175 CallArgList &args) {
176 const CGFunctionInfo &fnInfo =
177 CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
178 FunctionType::ExtInfo(), RequiredArgs::All);
179 llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
180 llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
181 return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
182}
183
184/// Does a store of the given IR type modify the full expected width?
185static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
186 uint64_t expectedSize) {
187 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
188}
189
190/// Does the atomic type require memsetting to zero before initialization?
191///
192/// The IR type is provided as a way of making certain queries faster.
193bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
194 // If the atomic type has size padding, we definitely need a memset.
195 if (hasPadding()) return true;
196
197 // Otherwise, do some simple heuristics to try to avoid it:
198 switch (getEvaluationKind()) {
199 // For scalars and complexes, check whether the store size of the
200 // type uses the full size.
201 case TEK_Scalar:
202 return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
203 case TEK_Complex:
204 return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
205 AtomicSizeInBits / 2);
206
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000207 // Padding in structs has an undefined bit pattern. User beware.
John McCalla8ec7eb2013-03-07 21:37:17 +0000208 case TEK_Aggregate:
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000209 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000210 }
211 llvm_unreachable("bad evaluation kind");
212}
213
Alexey Bataevb57056f2015-01-22 06:17:56 +0000214bool AtomicInfo::emitMemSetZeroIfNecessary() const {
215 assert(LVal.isSimple());
216 llvm::Value *addr = LVal.getAddress();
John McCalla8ec7eb2013-03-07 21:37:17 +0000217 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000218 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000219
220 CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
221 AtomicSizeInBits / 8,
Alexey Bataevb57056f2015-01-22 06:17:56 +0000222 LVal.getAlignment().getQuantity());
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000223 return true;
John McCalla8ec7eb2013-03-07 21:37:17 +0000224}
225
Tim Northovercadbbe12014-06-13 19:43:04 +0000226static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
Tim Northover9c177222014-03-13 19:25:48 +0000227 llvm::Value *Dest, llvm::Value *Ptr,
228 llvm::Value *Val1, llvm::Value *Val2,
229 uint64_t Size, unsigned Align,
230 llvm::AtomicOrdering SuccessOrder,
231 llvm::AtomicOrdering FailureOrder) {
232 // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
233 llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
234 Expected->setAlignment(Align);
235 llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
236 Desired->setAlignment(Align);
237
Tim Northoverb49b04b2014-06-13 14:24:59 +0000238 llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
Tim Northover9c177222014-03-13 19:25:48 +0000239 Ptr, Expected, Desired, SuccessOrder, FailureOrder);
Tim Northoverb49b04b2014-06-13 14:24:59 +0000240 Pair->setVolatile(E->isVolatile());
Tim Northovercadbbe12014-06-13 19:43:04 +0000241 Pair->setWeak(IsWeak);
Tim Northover9c177222014-03-13 19:25:48 +0000242
243 // Cmp holds the result of the compare-exchange operation: true on success,
244 // false on failure.
Tim Northoverb49b04b2014-06-13 14:24:59 +0000245 llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
246 llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
Tim Northover9c177222014-03-13 19:25:48 +0000247
248 // This basic block is used to hold the store instruction if the operation
249 // failed.
250 llvm::BasicBlock *StoreExpectedBB =
251 CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
252
253 // This basic block is the exit point of the operation, we should end up
254 // here regardless of whether or not the operation succeeded.
255 llvm::BasicBlock *ContinueBB =
256 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
257
258 // Update Expected if Expected isn't equal to Old, otherwise branch to the
259 // exit point.
260 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
261
262 CGF.Builder.SetInsertPoint(StoreExpectedBB);
263 // Update the memory at Expected with Old's value.
264 llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
265 StoreExpected->setAlignment(Align);
266 // Finally, branch to the exit point.
267 CGF.Builder.CreateBr(ContinueBB);
268
269 CGF.Builder.SetInsertPoint(ContinueBB);
270 // Update the memory at Dest with Cmp's value.
271 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
272 return;
273}
274
275/// Given an ordering required on success, emit all possible cmpxchg
276/// instructions to cope with the provided (but possibly only dynamically known)
277/// FailureOrder.
278static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
Tim Northovercadbbe12014-06-13 19:43:04 +0000279 bool IsWeak, llvm::Value *Dest,
280 llvm::Value *Ptr, llvm::Value *Val1,
281 llvm::Value *Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000282 llvm::Value *FailureOrderVal,
283 uint64_t Size, unsigned Align,
284 llvm::AtomicOrdering SuccessOrder) {
285 llvm::AtomicOrdering FailureOrder;
286 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
287 switch (FO->getSExtValue()) {
288 default:
289 FailureOrder = llvm::Monotonic;
290 break;
291 case AtomicExpr::AO_ABI_memory_order_consume:
292 case AtomicExpr::AO_ABI_memory_order_acquire:
293 FailureOrder = llvm::Acquire;
294 break;
295 case AtomicExpr::AO_ABI_memory_order_seq_cst:
296 FailureOrder = llvm::SequentiallyConsistent;
297 break;
298 }
299 if (FailureOrder >= SuccessOrder) {
300 // Don't assert on undefined behaviour.
301 FailureOrder =
302 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
303 }
Tim Northovercadbbe12014-06-13 19:43:04 +0000304 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align,
305 SuccessOrder, FailureOrder);
Tim Northover9c177222014-03-13 19:25:48 +0000306 return;
307 }
308
309 // Create all the relevant BB's
Craig Topper8a13c412014-05-21 05:09:00 +0000310 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
311 *SeqCstBB = nullptr;
Tim Northover9c177222014-03-13 19:25:48 +0000312 MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
313 if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
314 AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
315 if (SuccessOrder == llvm::SequentiallyConsistent)
316 SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
317
318 llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
319
320 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
321
322 // Emit all the different atomics
323
324 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
325 // doesn't matter unless someone is crazy enough to use something that
326 // doesn't fold to a constant for the ordering.
327 CGF.Builder.SetInsertPoint(MonotonicBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000328 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000329 Size, Align, SuccessOrder, llvm::Monotonic);
330 CGF.Builder.CreateBr(ContBB);
331
332 if (AcquireBB) {
333 CGF.Builder.SetInsertPoint(AcquireBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000334 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000335 Size, Align, SuccessOrder, llvm::Acquire);
336 CGF.Builder.CreateBr(ContBB);
337 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
338 AcquireBB);
339 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
340 AcquireBB);
341 }
342 if (SeqCstBB) {
343 CGF.Builder.SetInsertPoint(SeqCstBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000344 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000345 Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
346 CGF.Builder.CreateBr(ContBB);
347 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
348 SeqCstBB);
349 }
350
351 CGF.Builder.SetInsertPoint(ContBB);
352}
353
354static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
355 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
Tim Northovercadbbe12014-06-13 19:43:04 +0000356 llvm::Value *IsWeak, llvm::Value *FailureOrder,
357 uint64_t Size, unsigned Align,
358 llvm::AtomicOrdering Order) {
John McCallfc207f22013-03-07 21:37:12 +0000359 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
360 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
361
362 switch (E->getOp()) {
363 case AtomicExpr::AO__c11_atomic_init:
364 llvm_unreachable("Already handled!");
365
366 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
Tim Northovercadbbe12014-06-13 19:43:04 +0000367 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
368 FailureOrder, Size, Align, Order);
John McCallfc207f22013-03-07 21:37:12 +0000369 return;
Tim Northovercadbbe12014-06-13 19:43:04 +0000370 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
371 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
372 FailureOrder, Size, Align, Order);
373 return;
374 case AtomicExpr::AO__atomic_compare_exchange:
375 case AtomicExpr::AO__atomic_compare_exchange_n: {
376 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
377 emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
378 Val1, Val2, FailureOrder, Size, Align, Order);
379 } else {
380 // Create all the relevant BB's
381 llvm::BasicBlock *StrongBB =
382 CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
383 llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
384 llvm::BasicBlock *ContBB =
385 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
386
387 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
388 SI->addCase(CGF.Builder.getInt1(false), StrongBB);
389
390 CGF.Builder.SetInsertPoint(StrongBB);
391 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
392 FailureOrder, Size, Align, Order);
393 CGF.Builder.CreateBr(ContBB);
394
395 CGF.Builder.SetInsertPoint(WeakBB);
396 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
397 FailureOrder, Size, Align, Order);
398 CGF.Builder.CreateBr(ContBB);
399
400 CGF.Builder.SetInsertPoint(ContBB);
401 }
402 return;
403 }
John McCallfc207f22013-03-07 21:37:12 +0000404 case AtomicExpr::AO__c11_atomic_load:
405 case AtomicExpr::AO__atomic_load_n:
406 case AtomicExpr::AO__atomic_load: {
407 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
408 Load->setAtomic(Order);
409 Load->setAlignment(Size);
410 Load->setVolatile(E->isVolatile());
411 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
412 StoreDest->setAlignment(Align);
413 return;
414 }
415
416 case AtomicExpr::AO__c11_atomic_store:
417 case AtomicExpr::AO__atomic_store:
418 case AtomicExpr::AO__atomic_store_n: {
419 assert(!Dest && "Store does not return a value");
420 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
421 LoadVal1->setAlignment(Align);
422 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
423 Store->setAtomic(Order);
424 Store->setAlignment(Size);
425 Store->setVolatile(E->isVolatile());
426 return;
427 }
428
429 case AtomicExpr::AO__c11_atomic_exchange:
430 case AtomicExpr::AO__atomic_exchange_n:
431 case AtomicExpr::AO__atomic_exchange:
432 Op = llvm::AtomicRMWInst::Xchg;
433 break;
434
435 case AtomicExpr::AO__atomic_add_fetch:
436 PostOp = llvm::Instruction::Add;
437 // Fall through.
438 case AtomicExpr::AO__c11_atomic_fetch_add:
439 case AtomicExpr::AO__atomic_fetch_add:
440 Op = llvm::AtomicRMWInst::Add;
441 break;
442
443 case AtomicExpr::AO__atomic_sub_fetch:
444 PostOp = llvm::Instruction::Sub;
445 // Fall through.
446 case AtomicExpr::AO__c11_atomic_fetch_sub:
447 case AtomicExpr::AO__atomic_fetch_sub:
448 Op = llvm::AtomicRMWInst::Sub;
449 break;
450
451 case AtomicExpr::AO__atomic_and_fetch:
452 PostOp = llvm::Instruction::And;
453 // Fall through.
454 case AtomicExpr::AO__c11_atomic_fetch_and:
455 case AtomicExpr::AO__atomic_fetch_and:
456 Op = llvm::AtomicRMWInst::And;
457 break;
458
459 case AtomicExpr::AO__atomic_or_fetch:
460 PostOp = llvm::Instruction::Or;
461 // Fall through.
462 case AtomicExpr::AO__c11_atomic_fetch_or:
463 case AtomicExpr::AO__atomic_fetch_or:
464 Op = llvm::AtomicRMWInst::Or;
465 break;
466
467 case AtomicExpr::AO__atomic_xor_fetch:
468 PostOp = llvm::Instruction::Xor;
469 // Fall through.
470 case AtomicExpr::AO__c11_atomic_fetch_xor:
471 case AtomicExpr::AO__atomic_fetch_xor:
472 Op = llvm::AtomicRMWInst::Xor;
473 break;
474
475 case AtomicExpr::AO__atomic_nand_fetch:
476 PostOp = llvm::Instruction::And;
477 // Fall through.
478 case AtomicExpr::AO__atomic_fetch_nand:
479 Op = llvm::AtomicRMWInst::Nand;
480 break;
481 }
482
483 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
484 LoadVal1->setAlignment(Align);
485 llvm::AtomicRMWInst *RMWI =
486 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
487 RMWI->setVolatile(E->isVolatile());
488
489 // For __atomic_*_fetch operations, perform the operation again to
490 // determine the value which was written.
491 llvm::Value *Result = RMWI;
492 if (PostOp)
493 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
494 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
495 Result = CGF.Builder.CreateNot(Result);
496 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
497 StoreDest->setAlignment(Align);
498}
499
500// This function emits any expression (scalar, complex, or aggregate)
501// into a temporary alloca.
502static llvm::Value *
503EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
504 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
505 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
506 /*Init*/ true);
507 return DeclPtr;
508}
509
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000510static void
511AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000512 bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000513 SourceLocation Loc, CharUnits SizeInChars) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000514 if (UseOptimizedLibcall) {
515 // Load value and pass it to the function directly.
516 unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
David Majnemer0392cf82014-08-29 07:27:49 +0000517 int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
518 ValTy =
519 CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
520 llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
521 SizeInBits)->getPointerTo();
522 Val = CGF.EmitLoadOfScalar(CGF.Builder.CreateBitCast(Val, IPtrTy), false,
523 Align, CGF.getContext().getPointerType(ValTy),
524 Loc);
525 // Coerce the value into an appropriately sized integer type.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000526 Args.add(RValue::get(Val), ValTy);
527 } else {
528 // Non-optimized functions always take a reference.
529 Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
530 CGF.getContext().VoidPtrTy);
531 }
532}
533
John McCallfc207f22013-03-07 21:37:12 +0000534RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
535 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
536 QualType MemTy = AtomicTy;
537 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
538 MemTy = AT->getValueType();
539 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
540 uint64_t Size = sizeChars.getQuantity();
541 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
542 unsigned Align = alignChars.getQuantity();
543 unsigned MaxInlineWidthInBits =
John McCallc8e01702013-04-16 22:48:15 +0000544 getTarget().getMaxAtomicInlineWidth();
John McCallfc207f22013-03-07 21:37:12 +0000545 bool UseLibcall = (Size != Align ||
546 getContext().toBits(sizeChars) > MaxInlineWidthInBits);
547
Tim Northovercadbbe12014-06-13 19:43:04 +0000548 llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr,
549 *Val2 = nullptr;
Craig Topper8a13c412014-05-21 05:09:00 +0000550 llvm::Value *Ptr = EmitScalarExpr(E->getPtr());
John McCallfc207f22013-03-07 21:37:12 +0000551
552 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
553 assert(!Dest && "Init does not return a value");
John McCalla8ec7eb2013-03-07 21:37:17 +0000554 LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
555 EmitAtomicInit(E->getVal1(), lvalue);
Craig Topper8a13c412014-05-21 05:09:00 +0000556 return RValue::get(nullptr);
John McCallfc207f22013-03-07 21:37:12 +0000557 }
558
Craig Topper8a13c412014-05-21 05:09:00 +0000559 llvm::Value *Order = EmitScalarExpr(E->getOrder());
John McCallfc207f22013-03-07 21:37:12 +0000560
561 switch (E->getOp()) {
562 case AtomicExpr::AO__c11_atomic_init:
563 llvm_unreachable("Already handled!");
564
565 case AtomicExpr::AO__c11_atomic_load:
566 case AtomicExpr::AO__atomic_load_n:
567 break;
568
569 case AtomicExpr::AO__atomic_load:
570 Dest = EmitScalarExpr(E->getVal1());
571 break;
572
573 case AtomicExpr::AO__atomic_store:
574 Val1 = EmitScalarExpr(E->getVal1());
575 break;
576
577 case AtomicExpr::AO__atomic_exchange:
578 Val1 = EmitScalarExpr(E->getVal1());
579 Dest = EmitScalarExpr(E->getVal2());
580 break;
581
582 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
583 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
584 case AtomicExpr::AO__atomic_compare_exchange_n:
585 case AtomicExpr::AO__atomic_compare_exchange:
586 Val1 = EmitScalarExpr(E->getVal1());
587 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
588 Val2 = EmitScalarExpr(E->getVal2());
589 else
590 Val2 = EmitValToTemp(*this, E->getVal2());
591 OrderFail = EmitScalarExpr(E->getOrderFail());
John McCallfc207f22013-03-07 21:37:12 +0000592 if (E->getNumSubExprs() == 6)
Tim Northovercadbbe12014-06-13 19:43:04 +0000593 IsWeak = EmitScalarExpr(E->getWeak());
John McCallfc207f22013-03-07 21:37:12 +0000594 break;
595
596 case AtomicExpr::AO__c11_atomic_fetch_add:
597 case AtomicExpr::AO__c11_atomic_fetch_sub:
598 if (MemTy->isPointerType()) {
599 // For pointer arithmetic, we're required to do a bit of math:
600 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
601 // ... but only for the C11 builtins. The GNU builtins expect the
602 // user to multiply by sizeof(T).
603 QualType Val1Ty = E->getVal1()->getType();
604 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
605 CharUnits PointeeIncAmt =
606 getContext().getTypeSizeInChars(MemTy->getPointeeType());
607 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
608 Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
609 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
610 break;
611 }
612 // Fall through.
613 case AtomicExpr::AO__atomic_fetch_add:
614 case AtomicExpr::AO__atomic_fetch_sub:
615 case AtomicExpr::AO__atomic_add_fetch:
616 case AtomicExpr::AO__atomic_sub_fetch:
617 case AtomicExpr::AO__c11_atomic_store:
618 case AtomicExpr::AO__c11_atomic_exchange:
619 case AtomicExpr::AO__atomic_store_n:
620 case AtomicExpr::AO__atomic_exchange_n:
621 case AtomicExpr::AO__c11_atomic_fetch_and:
622 case AtomicExpr::AO__c11_atomic_fetch_or:
623 case AtomicExpr::AO__c11_atomic_fetch_xor:
624 case AtomicExpr::AO__atomic_fetch_and:
625 case AtomicExpr::AO__atomic_fetch_or:
626 case AtomicExpr::AO__atomic_fetch_xor:
627 case AtomicExpr::AO__atomic_fetch_nand:
628 case AtomicExpr::AO__atomic_and_fetch:
629 case AtomicExpr::AO__atomic_or_fetch:
630 case AtomicExpr::AO__atomic_xor_fetch:
631 case AtomicExpr::AO__atomic_nand_fetch:
632 Val1 = EmitValToTemp(*this, E->getVal1());
633 break;
634 }
635
David Majnemeree8d04d2014-12-12 08:16:09 +0000636 QualType RValTy = E->getType().getUnqualifiedType();
637
David Majnemer659be552014-11-25 23:44:32 +0000638 auto GetDest = [&] {
David Majnemeree8d04d2014-12-12 08:16:09 +0000639 if (!RValTy->isVoidType() && !Dest) {
640 Dest = CreateMemTemp(RValTy, ".atomicdst");
641 }
David Majnemer659be552014-11-25 23:44:32 +0000642 return Dest;
643 };
John McCallfc207f22013-03-07 21:37:12 +0000644
645 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
646 if (UseLibcall) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000647 bool UseOptimizedLibcall = false;
648 switch (E->getOp()) {
649 case AtomicExpr::AO__c11_atomic_fetch_add:
650 case AtomicExpr::AO__atomic_fetch_add:
651 case AtomicExpr::AO__c11_atomic_fetch_and:
652 case AtomicExpr::AO__atomic_fetch_and:
653 case AtomicExpr::AO__c11_atomic_fetch_or:
654 case AtomicExpr::AO__atomic_fetch_or:
655 case AtomicExpr::AO__c11_atomic_fetch_sub:
656 case AtomicExpr::AO__atomic_fetch_sub:
657 case AtomicExpr::AO__c11_atomic_fetch_xor:
658 case AtomicExpr::AO__atomic_fetch_xor:
659 // For these, only library calls for certain sizes exist.
660 UseOptimizedLibcall = true;
661 break;
662 default:
663 // Only use optimized library calls for sizes for which they exist.
664 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
665 UseOptimizedLibcall = true;
666 break;
667 }
John McCallfc207f22013-03-07 21:37:12 +0000668
John McCallfc207f22013-03-07 21:37:12 +0000669 CallArgList Args;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000670 if (!UseOptimizedLibcall) {
671 // For non-optimized library calls, the size is the first parameter
672 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
673 getContext().getSizeType());
674 }
675 // Atomic address is the first or second parameter
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000676 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
John McCallfc207f22013-03-07 21:37:12 +0000677
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000678 std::string LibCallName;
Logan Chien74798a32014-03-26 17:35:01 +0000679 QualType LoweredMemTy =
680 MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000681 QualType RetTy;
682 bool HaveRetTy = false;
John McCallfc207f22013-03-07 21:37:12 +0000683 switch (E->getOp()) {
684 // There is only one libcall for compare an exchange, because there is no
685 // optimisation benefit possible from a libcall version of a weak compare
686 // and exchange.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000687 // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
John McCallfc207f22013-03-07 21:37:12 +0000688 // void *desired, int success, int failure)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000689 // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
690 // int success, int failure)
John McCallfc207f22013-03-07 21:37:12 +0000691 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
692 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
693 case AtomicExpr::AO__atomic_compare_exchange:
694 case AtomicExpr::AO__atomic_compare_exchange_n:
695 LibCallName = "__atomic_compare_exchange";
696 RetTy = getContext().BoolTy;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000697 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000698 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
699 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000700 E->getExprLoc(), sizeChars);
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000701 Args.add(RValue::get(Order), getContext().IntTy);
John McCallfc207f22013-03-07 21:37:12 +0000702 Order = OrderFail;
703 break;
704 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
705 // int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000706 // T __atomic_exchange_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000707 case AtomicExpr::AO__c11_atomic_exchange:
708 case AtomicExpr::AO__atomic_exchange_n:
709 case AtomicExpr::AO__atomic_exchange:
710 LibCallName = "__atomic_exchange";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000711 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000712 E->getExprLoc(), sizeChars);
John McCallfc207f22013-03-07 21:37:12 +0000713 break;
714 // void __atomic_store(size_t size, void *mem, void *val, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000715 // void __atomic_store_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000716 case AtomicExpr::AO__c11_atomic_store:
717 case AtomicExpr::AO__atomic_store:
718 case AtomicExpr::AO__atomic_store_n:
719 LibCallName = "__atomic_store";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000720 RetTy = getContext().VoidTy;
721 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000722 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000723 E->getExprLoc(), sizeChars);
John McCallfc207f22013-03-07 21:37:12 +0000724 break;
725 // void __atomic_load(size_t size, void *mem, void *return, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000726 // T __atomic_load_N(T *mem, int order)
John McCallfc207f22013-03-07 21:37:12 +0000727 case AtomicExpr::AO__c11_atomic_load:
728 case AtomicExpr::AO__atomic_load:
729 case AtomicExpr::AO__atomic_load_n:
730 LibCallName = "__atomic_load";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000731 break;
732 // T __atomic_fetch_add_N(T *mem, T val, int order)
733 case AtomicExpr::AO__c11_atomic_fetch_add:
734 case AtomicExpr::AO__atomic_fetch_add:
735 LibCallName = "__atomic_fetch_add";
Logan Chien74798a32014-03-26 17:35:01 +0000736 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000737 E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000738 break;
739 // T __atomic_fetch_and_N(T *mem, T val, int order)
740 case AtomicExpr::AO__c11_atomic_fetch_and:
741 case AtomicExpr::AO__atomic_fetch_and:
742 LibCallName = "__atomic_fetch_and";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000743 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000744 E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000745 break;
746 // T __atomic_fetch_or_N(T *mem, T val, int order)
747 case AtomicExpr::AO__c11_atomic_fetch_or:
748 case AtomicExpr::AO__atomic_fetch_or:
749 LibCallName = "__atomic_fetch_or";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000750 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000751 E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000752 break;
753 // T __atomic_fetch_sub_N(T *mem, T val, int order)
754 case AtomicExpr::AO__c11_atomic_fetch_sub:
755 case AtomicExpr::AO__atomic_fetch_sub:
756 LibCallName = "__atomic_fetch_sub";
Logan Chien74798a32014-03-26 17:35:01 +0000757 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000758 E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000759 break;
760 // T __atomic_fetch_xor_N(T *mem, T val, int order)
761 case AtomicExpr::AO__c11_atomic_fetch_xor:
762 case AtomicExpr::AO__atomic_fetch_xor:
763 LibCallName = "__atomic_fetch_xor";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000764 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000765 E->getExprLoc(), sizeChars);
John McCallfc207f22013-03-07 21:37:12 +0000766 break;
John McCallfc207f22013-03-07 21:37:12 +0000767 default: return EmitUnsupportedRValue(E, "atomic library call");
768 }
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000769
770 // Optimized functions have the size in their name.
771 if (UseOptimizedLibcall)
772 LibCallName += "_" + llvm::utostr(Size);
773 // By default, assume we return a value of the atomic type.
774 if (!HaveRetTy) {
775 if (UseOptimizedLibcall) {
776 // Value is returned directly.
David Majnemer0392cf82014-08-29 07:27:49 +0000777 // The function returns an appropriately sized integer type.
778 RetTy = getContext().getIntTypeForBitwidth(
779 getContext().toBits(sizeChars), /*Signed=*/false);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000780 } else {
781 // Value is returned through parameter before the order.
782 RetTy = getContext().VoidTy;
David Majnemer659be552014-11-25 23:44:32 +0000783 Args.add(RValue::get(EmitCastToVoidPtr(Dest)), getContext().VoidPtrTy);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000784 }
785 }
John McCallfc207f22013-03-07 21:37:12 +0000786 // order is always the last parameter
787 Args.add(RValue::get(Order),
788 getContext().IntTy);
789
David Majnemer659be552014-11-25 23:44:32 +0000790 RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
791 // The value is returned directly from the libcall.
792 if (HaveRetTy && !RetTy->isVoidType())
793 return Res;
794 // The value is returned via an explicit out param.
795 if (RetTy->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +0000796 return RValue::get(nullptr);
David Majnemer659be552014-11-25 23:44:32 +0000797 // The value is returned directly for optimized libcalls but the caller is
798 // expected an out-param.
799 if (UseOptimizedLibcall) {
800 llvm::Value *ResVal = Res.getScalarVal();
801 llvm::StoreInst *StoreDest = Builder.CreateStore(
802 ResVal,
803 Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo()));
804 StoreDest->setAlignment(Align);
805 }
David Majnemeree8d04d2014-12-12 08:16:09 +0000806 return convertTempToRValue(Dest, RValTy, E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000807 }
808
809 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
810 E->getOp() == AtomicExpr::AO__atomic_store ||
811 E->getOp() == AtomicExpr::AO__atomic_store_n;
812 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
813 E->getOp() == AtomicExpr::AO__atomic_load ||
814 E->getOp() == AtomicExpr::AO__atomic_load_n;
815
David Majnemerd8cd8f72014-11-22 10:44:12 +0000816 llvm::Type *ITy =
817 llvm::IntegerType::get(getLLVMContext(), Size * 8);
David Majnemer659be552014-11-25 23:44:32 +0000818 llvm::Value *OrigDest = GetDest();
David Majnemerd8cd8f72014-11-22 10:44:12 +0000819 Ptr = Builder.CreateBitCast(
820 Ptr, ITy->getPointerTo(Ptr->getType()->getPointerAddressSpace()));
821 if (Val1) Val1 = Builder.CreateBitCast(Val1, ITy->getPointerTo());
822 if (Val2) Val2 = Builder.CreateBitCast(Val2, ITy->getPointerTo());
823 if (Dest && !E->isCmpXChg())
824 Dest = Builder.CreateBitCast(Dest, ITy->getPointerTo());
John McCallfc207f22013-03-07 21:37:12 +0000825
826 if (isa<llvm::ConstantInt>(Order)) {
827 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
828 switch (ord) {
Tim Northovere94a34c2014-03-11 10:49:14 +0000829 case AtomicExpr::AO_ABI_memory_order_relaxed:
Tim Northovercadbbe12014-06-13 19:43:04 +0000830 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000831 Size, Align, llvm::Monotonic);
John McCallfc207f22013-03-07 21:37:12 +0000832 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000833 case AtomicExpr::AO_ABI_memory_order_consume:
834 case AtomicExpr::AO_ABI_memory_order_acquire:
John McCallfc207f22013-03-07 21:37:12 +0000835 if (IsStore)
836 break; // Avoid crashing on code with undefined behavior
Tim Northovercadbbe12014-06-13 19:43:04 +0000837 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000838 Size, Align, llvm::Acquire);
John McCallfc207f22013-03-07 21:37:12 +0000839 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000840 case AtomicExpr::AO_ABI_memory_order_release:
John McCallfc207f22013-03-07 21:37:12 +0000841 if (IsLoad)
842 break; // Avoid crashing on code with undefined behavior
Tim Northovercadbbe12014-06-13 19:43:04 +0000843 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000844 Size, Align, llvm::Release);
John McCallfc207f22013-03-07 21:37:12 +0000845 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000846 case AtomicExpr::AO_ABI_memory_order_acq_rel:
John McCallfc207f22013-03-07 21:37:12 +0000847 if (IsLoad || IsStore)
848 break; // Avoid crashing on code with undefined behavior
Tim Northovercadbbe12014-06-13 19:43:04 +0000849 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000850 Size, Align, llvm::AcquireRelease);
John McCallfc207f22013-03-07 21:37:12 +0000851 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000852 case AtomicExpr::AO_ABI_memory_order_seq_cst:
Tim Northovercadbbe12014-06-13 19:43:04 +0000853 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000854 Size, Align, llvm::SequentiallyConsistent);
John McCallfc207f22013-03-07 21:37:12 +0000855 break;
856 default: // invalid order
857 // We should not ever get here normally, but it's hard to
858 // enforce that in general.
859 break;
860 }
David Majnemeree8d04d2014-12-12 08:16:09 +0000861 if (RValTy->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +0000862 return RValue::get(nullptr);
David Majnemeree8d04d2014-12-12 08:16:09 +0000863 return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000864 }
865
866 // Long case, when Order isn't obviously constant.
867
868 // Create all the relevant BB's
Craig Topper8a13c412014-05-21 05:09:00 +0000869 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
870 *ReleaseBB = nullptr, *AcqRelBB = nullptr,
871 *SeqCstBB = nullptr;
John McCallfc207f22013-03-07 21:37:12 +0000872 MonotonicBB = createBasicBlock("monotonic", CurFn);
873 if (!IsStore)
874 AcquireBB = createBasicBlock("acquire", CurFn);
875 if (!IsLoad)
876 ReleaseBB = createBasicBlock("release", CurFn);
877 if (!IsLoad && !IsStore)
878 AcqRelBB = createBasicBlock("acqrel", CurFn);
879 SeqCstBB = createBasicBlock("seqcst", CurFn);
880 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
881
882 // Create the switch for the split
883 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
884 // doesn't matter unless someone is crazy enough to use something that
885 // doesn't fold to a constant for the ordering.
886 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
887 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
888
889 // Emit all the different atomics
890 Builder.SetInsertPoint(MonotonicBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000891 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000892 Size, Align, llvm::Monotonic);
John McCallfc207f22013-03-07 21:37:12 +0000893 Builder.CreateBr(ContBB);
894 if (!IsStore) {
895 Builder.SetInsertPoint(AcquireBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000896 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000897 Size, Align, llvm::Acquire);
John McCallfc207f22013-03-07 21:37:12 +0000898 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000899 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
900 AcquireBB);
901 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
902 AcquireBB);
John McCallfc207f22013-03-07 21:37:12 +0000903 }
904 if (!IsLoad) {
905 Builder.SetInsertPoint(ReleaseBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000906 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000907 Size, Align, llvm::Release);
John McCallfc207f22013-03-07 21:37:12 +0000908 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000909 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
910 ReleaseBB);
John McCallfc207f22013-03-07 21:37:12 +0000911 }
912 if (!IsLoad && !IsStore) {
913 Builder.SetInsertPoint(AcqRelBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000914 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000915 Size, Align, llvm::AcquireRelease);
John McCallfc207f22013-03-07 21:37:12 +0000916 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000917 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
918 AcqRelBB);
John McCallfc207f22013-03-07 21:37:12 +0000919 }
920 Builder.SetInsertPoint(SeqCstBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000921 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000922 Size, Align, llvm::SequentiallyConsistent);
John McCallfc207f22013-03-07 21:37:12 +0000923 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000924 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
925 SeqCstBB);
John McCallfc207f22013-03-07 21:37:12 +0000926
927 // Cleanup and return
928 Builder.SetInsertPoint(ContBB);
David Majnemeree8d04d2014-12-12 08:16:09 +0000929 if (RValTy->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +0000930 return RValue::get(nullptr);
David Majnemeree8d04d2014-12-12 08:16:09 +0000931 return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000932}
John McCalla8ec7eb2013-03-07 21:37:17 +0000933
934llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
935 unsigned addrspace =
936 cast<llvm::PointerType>(addr->getType())->getAddressSpace();
937 llvm::IntegerType *ty =
938 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
939 return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
940}
941
942RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000943 AggValueSlot resultSlot,
944 SourceLocation loc) const {
Alexey Bataevb57056f2015-01-22 06:17:56 +0000945 if (LVal.isSimple()) {
946 if (EvaluationKind == TEK_Aggregate)
947 return resultSlot.asRValue();
John McCalla8ec7eb2013-03-07 21:37:17 +0000948
Alexey Bataevb57056f2015-01-22 06:17:56 +0000949 // Drill into the padding structure if we have one.
950 if (hasPadding())
951 addr = CGF.Builder.CreateStructGEP(addr, 0);
John McCalla8ec7eb2013-03-07 21:37:17 +0000952
Alexey Bataevb57056f2015-01-22 06:17:56 +0000953 // Otherwise, just convert the temporary to an r-value using the
954 // normal conversion routine.
955 return CGF.convertTempToRValue(addr, getValueType(), loc);
956 } else if (LVal.isBitField())
957 return CGF.EmitLoadOfBitfieldLValue(LValue::MakeBitfield(
958 addr, LVal.getBitFieldInfo(), LVal.getType(), LVal.getAlignment()));
959 else if (LVal.isVectorElt())
960 return CGF.EmitLoadOfLValue(LValue::MakeVectorElt(addr, LVal.getVectorIdx(),
961 LVal.getType(),
962 LVal.getAlignment()),
963 loc);
964 assert(LVal.isExtVectorElt());
965 return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
966 addr, LVal.getExtVectorElts(), LVal.getType(), LVal.getAlignment()));
John McCalla8ec7eb2013-03-07 21:37:17 +0000967}
968
Alexey Bataev452d8e12014-12-15 05:25:25 +0000969RValue AtomicInfo::convertIntToValue(llvm::Value *IntVal,
970 AggValueSlot ResultSlot,
971 SourceLocation Loc) const {
Alexey Bataevb57056f2015-01-22 06:17:56 +0000972 assert(LVal.isSimple());
Alexey Bataev452d8e12014-12-15 05:25:25 +0000973 // Try not to in some easy cases.
974 assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
975 if (getEvaluationKind() == TEK_Scalar && !hasPadding()) {
976 auto *ValTy = CGF.ConvertTypeForMem(ValueTy);
977 if (ValTy->isIntegerTy()) {
978 assert(IntVal->getType() == ValTy && "Different integer types.");
979 return RValue::get(IntVal);
980 } else if (ValTy->isPointerTy())
981 return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
982 else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
983 return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
984 }
985
986 // Create a temporary. This needs to be big enough to hold the
987 // atomic integer.
988 llvm::Value *Temp;
989 bool TempIsVolatile = false;
990 CharUnits TempAlignment;
991 if (getEvaluationKind() == TEK_Aggregate) {
992 assert(!ResultSlot.isIgnored());
993 Temp = ResultSlot.getAddr();
994 TempAlignment = getValueAlignment();
995 TempIsVolatile = ResultSlot.isVolatile();
996 } else {
997 Temp = CGF.CreateMemTemp(getAtomicType(), "atomic-temp");
998 TempAlignment = getAtomicAlignment();
999 }
1000
1001 // Slam the integer into the temporary.
1002 llvm::Value *CastTemp = emitCastToAtomicIntPointer(Temp);
1003 CGF.Builder.CreateAlignedStore(IntVal, CastTemp, TempAlignment.getQuantity())
1004 ->setVolatile(TempIsVolatile);
1005
1006 return convertTempToRValue(Temp, ResultSlot, Loc);
1007}
1008
John McCalla8ec7eb2013-03-07 21:37:17 +00001009/// Emit a load from an l-value of atomic type. Note that the r-value
1010/// we produce is an r-value of the atomic *value* type.
Nick Lewycky2d84e842013-10-02 02:29:49 +00001011RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
1012 AggValueSlot resultSlot) {
John McCalla8ec7eb2013-03-07 21:37:17 +00001013 AtomicInfo atomics(*this, src);
Alexey Bataevb57056f2015-01-22 06:17:56 +00001014 LValue LVal = atomics.getAtomicLValue();
1015 llvm::Value *SrcAddr = nullptr;
1016 llvm::AllocaInst *NonSimpleTempAlloca = nullptr;
1017 if (LVal.isSimple())
1018 SrcAddr = LVal.getAddress();
1019 else {
1020 if (LVal.isBitField())
1021 SrcAddr = LVal.getBitFieldAddr();
1022 else if (LVal.isVectorElt())
1023 SrcAddr = LVal.getVectorAddr();
1024 else {
1025 assert(LVal.isExtVectorElt());
1026 SrcAddr = LVal.getExtVectorAddr();
1027 }
1028 NonSimpleTempAlloca = CreateTempAlloca(
1029 SrcAddr->getType()->getPointerElementType(), "atomic-load-temp");
1030 NonSimpleTempAlloca->setAlignment(getContext().toBits(src.getAlignment()));
1031 }
John McCalla8ec7eb2013-03-07 21:37:17 +00001032
1033 // Check whether we should use a library call.
1034 if (atomics.shouldUseLibcall()) {
1035 llvm::Value *tempAddr;
Alexey Bataevb57056f2015-01-22 06:17:56 +00001036 if (LVal.isSimple()) {
1037 if (!resultSlot.isIgnored()) {
1038 assert(atomics.getEvaluationKind() == TEK_Aggregate);
1039 tempAddr = resultSlot.getAddr();
1040 } else
1041 tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
1042 } else
1043 tempAddr = NonSimpleTempAlloca;
John McCalla8ec7eb2013-03-07 21:37:17 +00001044
1045 // void __atomic_load(size_t size, void *mem, void *return, int order);
1046 CallArgList args;
1047 args.add(RValue::get(atomics.getAtomicSizeValue()),
1048 getContext().getSizeType());
Alexey Bataevb57056f2015-01-22 06:17:56 +00001049 args.add(RValue::get(EmitCastToVoidPtr(SrcAddr)), getContext().VoidPtrTy);
1050 args.add(RValue::get(EmitCastToVoidPtr(tempAddr)), getContext().VoidPtrTy);
Tim Northovere94a34c2014-03-11 10:49:14 +00001051 args.add(RValue::get(llvm::ConstantInt::get(
1052 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
John McCalla8ec7eb2013-03-07 21:37:17 +00001053 getContext().IntTy);
1054 emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
1055
1056 // Produce the r-value.
Nick Lewycky2d84e842013-10-02 02:29:49 +00001057 return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
John McCalla8ec7eb2013-03-07 21:37:17 +00001058 }
1059
1060 // Okay, we're doing this natively.
Alexey Bataevb57056f2015-01-22 06:17:56 +00001061 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(SrcAddr);
John McCalla8ec7eb2013-03-07 21:37:17 +00001062 llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
Nico Weber7ce96b82015-02-13 16:27:00 +00001063 load->setAtomic(llvm::SequentiallyConsistent);
John McCalla8ec7eb2013-03-07 21:37:17 +00001064
1065 // Other decoration.
1066 load->setAlignment(src.getAlignment().getQuantity());
Nico Weber7ce96b82015-02-13 16:27:00 +00001067 if (src.isVolatileQualified())
John McCalla8ec7eb2013-03-07 21:37:17 +00001068 load->setVolatile(true);
1069 if (src.getTBAAInfo())
1070 CGM.DecorateInstruction(load, src.getTBAAInfo());
1071
John McCalla8ec7eb2013-03-07 21:37:17 +00001072 // If we're ignoring an aggregate return, don't do anything.
1073 if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
Craig Topper8a13c412014-05-21 05:09:00 +00001074 return RValue::getAggregate(nullptr, false);
John McCalla8ec7eb2013-03-07 21:37:17 +00001075
Alexey Bataev452d8e12014-12-15 05:25:25 +00001076 // Okay, turn that back into the original value type.
Alexey Bataevb57056f2015-01-22 06:17:56 +00001077 if (src.isSimple())
1078 return atomics.convertIntToValue(load, resultSlot, loc);
1079
1080 auto *IntAddr = atomics.emitCastToAtomicIntPointer(NonSimpleTempAlloca);
1081 Builder.CreateAlignedStore(load, IntAddr, src.getAlignment().getQuantity());
1082 return atomics.convertTempToRValue(NonSimpleTempAlloca, resultSlot, loc);
John McCalla8ec7eb2013-03-07 21:37:17 +00001083}
1084
1085
1086
1087/// Copy an r-value into memory as part of storing to an atomic type.
1088/// This needs to create a bit-pattern suitable for atomic operations.
Alexey Bataevb57056f2015-01-22 06:17:56 +00001089void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1090 assert(LVal.isSimple());
John McCalla8ec7eb2013-03-07 21:37:17 +00001091 // If we have an r-value, the rvalue should be of the atomic type,
1092 // which means that the caller is responsible for having zeroed
1093 // any padding. Just do an aggregate copy of that type.
1094 if (rvalue.isAggregate()) {
Alexey Bataevb57056f2015-01-22 06:17:56 +00001095 CGF.EmitAggregateCopy(LVal.getAddress(),
John McCalla8ec7eb2013-03-07 21:37:17 +00001096 rvalue.getAggregateAddr(),
1097 getAtomicType(),
1098 (rvalue.isVolatileQualified()
Alexey Bataevb57056f2015-01-22 06:17:56 +00001099 || LVal.isVolatileQualified()),
1100 LVal.getAlignment());
John McCalla8ec7eb2013-03-07 21:37:17 +00001101 return;
1102 }
1103
1104 // Okay, otherwise we're copying stuff.
1105
1106 // Zero out the buffer if necessary.
Alexey Bataevb57056f2015-01-22 06:17:56 +00001107 emitMemSetZeroIfNecessary();
John McCalla8ec7eb2013-03-07 21:37:17 +00001108
1109 // Drill past the padding if present.
Alexey Bataevb57056f2015-01-22 06:17:56 +00001110 LValue TempLVal = projectValue();
John McCalla8ec7eb2013-03-07 21:37:17 +00001111
1112 // Okay, store the rvalue in.
1113 if (rvalue.isScalar()) {
Alexey Bataevb57056f2015-01-22 06:17:56 +00001114 CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
John McCalla8ec7eb2013-03-07 21:37:17 +00001115 } else {
Alexey Bataevb57056f2015-01-22 06:17:56 +00001116 CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
John McCalla8ec7eb2013-03-07 21:37:17 +00001117 }
1118}
1119
1120
1121/// Materialize an r-value into memory for the purposes of storing it
1122/// to an atomic type.
1123llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
1124 // Aggregate r-values are already in memory, and EmitAtomicStore
1125 // requires them to be values of the atomic type.
1126 if (rvalue.isAggregate())
1127 return rvalue.getAggregateAddr();
1128
1129 // Otherwise, make a temporary and materialize into it.
1130 llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
Alexey Bataevb57056f2015-01-22 06:17:56 +00001131 LValue tempLV =
1132 CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
1133 AtomicInfo Atomics(CGF, tempLV);
1134 Atomics.emitCopyIntoMemory(rvalue);
John McCalla8ec7eb2013-03-07 21:37:17 +00001135 return temp;
1136}
1137
Alexey Bataev452d8e12014-12-15 05:25:25 +00001138llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1139 // If we've got a scalar value of the right size, try to avoid going
1140 // through memory.
1141 if (RVal.isScalar() && !hasPadding()) {
1142 llvm::Value *Value = RVal.getScalarVal();
1143 if (isa<llvm::IntegerType>(Value->getType()))
1144 return Value;
1145 else {
1146 llvm::IntegerType *InputIntTy =
1147 llvm::IntegerType::get(CGF.getLLVMContext(), getValueSizeInBits());
1148 if (isa<llvm::PointerType>(Value->getType()))
1149 return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1150 else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1151 return CGF.Builder.CreateBitCast(Value, InputIntTy);
1152 }
1153 }
1154 // Otherwise, we need to go through memory.
1155 // Put the r-value in memory.
1156 llvm::Value *Addr = materializeRValue(RVal);
1157
1158 // Cast the temporary to the atomic int type and pull a value out.
1159 Addr = emitCastToAtomicIntPointer(Addr);
1160 return CGF.Builder.CreateAlignedLoad(Addr,
1161 getAtomicAlignment().getQuantity());
1162}
1163
John McCalla8ec7eb2013-03-07 21:37:17 +00001164/// Emit a store to an l-value of atomic type.
1165///
1166/// Note that the r-value is expected to be an r-value *of the atomic
1167/// type*; this means that for aggregate r-values, it should include
1168/// storage for any padding that was necessary.
Nico Weber7ce96b82015-02-13 16:27:00 +00001169void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
John McCalla8ec7eb2013-03-07 21:37:17 +00001170 // If this is an aggregate r-value, it should agree in type except
1171 // maybe for address-space qualification.
1172 assert(!rvalue.isAggregate() ||
1173 rvalue.getAggregateAddr()->getType()->getPointerElementType()
1174 == dest.getAddress()->getType()->getPointerElementType());
1175
1176 AtomicInfo atomics(*this, dest);
1177
1178 // If this is an initialization, just put the value there normally.
1179 if (isInit) {
Alexey Bataevb57056f2015-01-22 06:17:56 +00001180 atomics.emitCopyIntoMemory(rvalue);
John McCalla8ec7eb2013-03-07 21:37:17 +00001181 return;
1182 }
1183
1184 // Check whether we should use a library call.
1185 if (atomics.shouldUseLibcall()) {
1186 // Produce a source address.
1187 llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
1188
1189 // void __atomic_store(size_t size, void *mem, void *val, int order)
1190 CallArgList args;
1191 args.add(RValue::get(atomics.getAtomicSizeValue()),
1192 getContext().getSizeType());
1193 args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
1194 getContext().VoidPtrTy);
1195 args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
1196 getContext().VoidPtrTy);
Tim Northovere94a34c2014-03-11 10:49:14 +00001197 args.add(RValue::get(llvm::ConstantInt::get(
1198 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
John McCalla8ec7eb2013-03-07 21:37:17 +00001199 getContext().IntTy);
1200 emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1201 return;
1202 }
1203
1204 // Okay, we're doing this natively.
Alexey Bataev452d8e12014-12-15 05:25:25 +00001205 llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
John McCalla8ec7eb2013-03-07 21:37:17 +00001206
1207 // Do the atomic store.
1208 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
1209 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1210
1211 // Initializations don't need to be atomic.
Nico Weber7ce96b82015-02-13 16:27:00 +00001212 if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
John McCalla8ec7eb2013-03-07 21:37:17 +00001213
1214 // Other decoration.
1215 store->setAlignment(dest.getAlignment().getQuantity());
Nico Weber7ce96b82015-02-13 16:27:00 +00001216 if (dest.isVolatileQualified())
John McCalla8ec7eb2013-03-07 21:37:17 +00001217 store->setVolatile(true);
1218 if (dest.getTBAAInfo())
1219 CGM.DecorateInstruction(store, dest.getTBAAInfo());
1220}
1221
Alexey Bataev452d8e12014-12-15 05:25:25 +00001222/// Emit a compare-and-exchange op for atomic type.
1223///
1224std::pair<RValue, RValue> CodeGenFunction::EmitAtomicCompareExchange(
1225 LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
1226 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
1227 AggValueSlot Slot) {
1228 // If this is an aggregate r-value, it should agree in type except
1229 // maybe for address-space qualification.
1230 assert(!Expected.isAggregate() ||
1231 Expected.getAggregateAddr()->getType()->getPointerElementType() ==
1232 Obj.getAddress()->getType()->getPointerElementType());
1233 assert(!Desired.isAggregate() ||
1234 Desired.getAggregateAddr()->getType()->getPointerElementType() ==
1235 Obj.getAddress()->getType()->getPointerElementType());
1236 AtomicInfo Atomics(*this, Obj);
1237
1238 if (Failure >= Success)
1239 // Don't assert on undefined behavior.
1240 Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1241
1242 auto Alignment = Atomics.getValueAlignment();
1243 // Check whether we should use a library call.
1244 if (Atomics.shouldUseLibcall()) {
1245 auto *ExpectedAddr = Atomics.materializeRValue(Expected);
1246 // Produce a source address.
1247 auto *DesiredAddr = Atomics.materializeRValue(Desired);
1248 // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1249 // void *desired, int success, int failure);
1250 CallArgList Args;
1251 Args.add(RValue::get(Atomics.getAtomicSizeValue()),
1252 getContext().getSizeType());
1253 Args.add(RValue::get(EmitCastToVoidPtr(Obj.getAddress())),
1254 getContext().VoidPtrTy);
1255 Args.add(RValue::get(EmitCastToVoidPtr(ExpectedAddr)),
1256 getContext().VoidPtrTy);
1257 Args.add(RValue::get(EmitCastToVoidPtr(DesiredAddr)),
1258 getContext().VoidPtrTy);
1259 Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Success)),
1260 getContext().IntTy);
1261 Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Failure)),
1262 getContext().IntTy);
1263 auto SuccessFailureRVal = emitAtomicLibcall(
1264 *this, "__atomic_compare_exchange", getContext().BoolTy, Args);
1265 auto *PreviousVal =
1266 Builder.CreateAlignedLoad(ExpectedAddr, Alignment.getQuantity());
1267 return std::make_pair(RValue::get(PreviousVal), SuccessFailureRVal);
1268 }
1269
1270 // If we've got a scalar value of the right size, try to avoid going
1271 // through memory.
1272 auto *ExpectedIntVal = Atomics.convertRValueToInt(Expected);
1273 auto *DesiredIntVal = Atomics.convertRValueToInt(Desired);
1274
1275 // Do the atomic store.
1276 auto *Addr = Atomics.emitCastToAtomicIntPointer(Obj.getAddress());
1277 auto *Inst = Builder.CreateAtomicCmpXchg(Addr, ExpectedIntVal, DesiredIntVal,
1278 Success, Failure);
1279 // Other decoration.
1280 Inst->setVolatile(Obj.isVolatileQualified());
1281 Inst->setWeak(IsWeak);
1282
1283 // Okay, turn that back into the original value type.
1284 auto *PreviousVal = Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1285 auto *SuccessFailureVal = Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1286 return std::make_pair(Atomics.convertIntToValue(PreviousVal, Slot, Loc),
1287 RValue::get(SuccessFailureVal));
1288}
1289
John McCalla8ec7eb2013-03-07 21:37:17 +00001290void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1291 AtomicInfo atomics(*this, dest);
1292
1293 switch (atomics.getEvaluationKind()) {
1294 case TEK_Scalar: {
1295 llvm::Value *value = EmitScalarExpr(init);
Alexey Bataevb57056f2015-01-22 06:17:56 +00001296 atomics.emitCopyIntoMemory(RValue::get(value));
John McCalla8ec7eb2013-03-07 21:37:17 +00001297 return;
1298 }
1299
1300 case TEK_Complex: {
1301 ComplexPairTy value = EmitComplexExpr(init);
Alexey Bataevb57056f2015-01-22 06:17:56 +00001302 atomics.emitCopyIntoMemory(RValue::getComplex(value));
John McCalla8ec7eb2013-03-07 21:37:17 +00001303 return;
1304 }
1305
1306 case TEK_Aggregate: {
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001307 // Fix up the destination if the initializer isn't an expression
1308 // of atomic type.
1309 bool Zeroed = false;
John McCalla8ec7eb2013-03-07 21:37:17 +00001310 if (!init->getType()->isAtomicType()) {
Alexey Bataevb57056f2015-01-22 06:17:56 +00001311 Zeroed = atomics.emitMemSetZeroIfNecessary();
1312 dest = atomics.projectValue();
John McCalla8ec7eb2013-03-07 21:37:17 +00001313 }
1314
1315 // Evaluate the expression directly into the destination.
1316 AggValueSlot slot = AggValueSlot::forLValue(dest,
1317 AggValueSlot::IsNotDestructed,
1318 AggValueSlot::DoesNotNeedGCBarriers,
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001319 AggValueSlot::IsNotAliased,
1320 Zeroed ? AggValueSlot::IsZeroed :
1321 AggValueSlot::IsNotZeroed);
1322
John McCalla8ec7eb2013-03-07 21:37:17 +00001323 EmitAggExpr(init, slot);
1324 return;
1325 }
1326 }
1327 llvm_unreachable("bad evaluation kind");
1328}