blob: 57b92dd97dc6e3aca6c6c575063d9ca2bb0b8caf [file] [log] [blame]
John McCallfc207f22013-03-07 21:37:12 +00001//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the code for emitting atomic operations.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCall.h"
Alexey Bataevb57056f2015-01-22 06:17:56 +000016#include "CGRecordLayout.h"
John McCallfc207f22013-03-07 21:37:12 +000017#include "CodeGenModule.h"
18#include "clang/AST/ASTContext.h"
Mark Laceya8e7df32013-10-30 21:53:58 +000019#include "clang/CodeGen/CGFunctionInfo.h"
Ed Schoutenc7e82bd2013-05-31 19:27:59 +000020#include "llvm/ADT/StringExtras.h"
John McCallfc207f22013-03-07 21:37:12 +000021#include "llvm/IR/DataLayout.h"
22#include "llvm/IR/Intrinsics.h"
John McCalla8ec7eb2013-03-07 21:37:17 +000023#include "llvm/IR/Operator.h"
John McCallfc207f22013-03-07 21:37:12 +000024
25using namespace clang;
26using namespace CodeGen;
27
John McCalla8ec7eb2013-03-07 21:37:17 +000028namespace {
29 class AtomicInfo {
30 CodeGenFunction &CGF;
31 QualType AtomicTy;
32 QualType ValueTy;
33 uint64_t AtomicSizeInBits;
34 uint64_t ValueSizeInBits;
35 CharUnits AtomicAlign;
36 CharUnits ValueAlign;
37 CharUnits LValueAlign;
38 TypeEvaluationKind EvaluationKind;
39 bool UseLibcall;
Alexey Bataevb57056f2015-01-22 06:17:56 +000040 LValue LVal;
41 CGBitFieldInfo BFI;
John McCalla8ec7eb2013-03-07 21:37:17 +000042 public:
Alexey Bataevb57056f2015-01-22 06:17:56 +000043 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45 EvaluationKind(TEK_Scalar), UseLibcall(true) {
46 assert(!lvalue.isGlobalReg());
John McCalla8ec7eb2013-03-07 21:37:17 +000047 ASTContext &C = CGF.getContext();
Alexey Bataevb57056f2015-01-22 06:17:56 +000048 if (lvalue.isSimple()) {
49 AtomicTy = lvalue.getType();
50 if (auto *ATy = AtomicTy->getAs<AtomicType>())
51 ValueTy = ATy->getValueType();
52 else
53 ValueTy = AtomicTy;
54 EvaluationKind = CGF.getEvaluationKind(ValueTy);
John McCalla8ec7eb2013-03-07 21:37:17 +000055
Alexey Bataevb57056f2015-01-22 06:17:56 +000056 uint64_t ValueAlignInBits;
57 uint64_t AtomicAlignInBits;
58 TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59 ValueSizeInBits = ValueTI.Width;
60 ValueAlignInBits = ValueTI.Align;
John McCalla8ec7eb2013-03-07 21:37:17 +000061
Alexey Bataevb57056f2015-01-22 06:17:56 +000062 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63 AtomicSizeInBits = AtomicTI.Width;
64 AtomicAlignInBits = AtomicTI.Align;
John McCalla8ec7eb2013-03-07 21:37:17 +000065
Alexey Bataevb57056f2015-01-22 06:17:56 +000066 assert(ValueSizeInBits <= AtomicSizeInBits);
67 assert(ValueAlignInBits <= AtomicAlignInBits);
John McCalla8ec7eb2013-03-07 21:37:17 +000068
Alexey Bataevb57056f2015-01-22 06:17:56 +000069 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71 if (lvalue.getAlignment().isZero())
72 lvalue.setAlignment(AtomicAlign);
John McCalla8ec7eb2013-03-07 21:37:17 +000073
Alexey Bataevb57056f2015-01-22 06:17:56 +000074 LVal = lvalue;
75 } else if (lvalue.isBitField()) {
Alexey Bataevb8329262015-02-27 06:33:30 +000076 ValueTy = lvalue.getType();
77 ValueSizeInBits = C.getTypeSize(ValueTy);
Alexey Bataevb57056f2015-01-22 06:17:56 +000078 auto &OrigBFI = lvalue.getBitFieldInfo();
79 auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
80 AtomicSizeInBits = C.toBits(
81 C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
82 .RoundUpToAlignment(lvalue.getAlignment()));
83 auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldAddr());
84 auto OffsetInChars =
85 (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
86 lvalue.getAlignment();
87 VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
88 VoidPtrAddr, OffsetInChars.getQuantity());
89 auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
90 VoidPtrAddr,
91 CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92 "atomic_bitfield_base");
93 BFI = OrigBFI;
94 BFI.Offset = Offset;
95 BFI.StorageSize = AtomicSizeInBits;
96 LVal = LValue::MakeBitfield(Addr, BFI, lvalue.getType(),
97 lvalue.getAlignment());
Alexey Bataevb8329262015-02-27 06:33:30 +000098 LVal.setTBAAInfo(lvalue.getTBAAInfo());
99 AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
100 if (AtomicTy.isNull()) {
101 llvm::APInt Size(
102 /*numBits=*/32,
103 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
104 AtomicTy = C.getConstantArrayType(C.CharTy, Size, ArrayType::Normal,
105 /*IndexTypeQuals=*/0);
106 }
107 AtomicAlign = ValueAlign = lvalue.getAlignment();
Alexey Bataevb57056f2015-01-22 06:17:56 +0000108 } else if (lvalue.isVectorElt()) {
Alexey Bataevb8329262015-02-27 06:33:30 +0000109 ValueTy = lvalue.getType()->getAs<VectorType>()->getElementType();
110 ValueSizeInBits = C.getTypeSize(ValueTy);
111 AtomicTy = lvalue.getType();
112 AtomicSizeInBits = C.getTypeSize(AtomicTy);
113 AtomicAlign = ValueAlign = lvalue.getAlignment();
Alexey Bataevb57056f2015-01-22 06:17:56 +0000114 LVal = lvalue;
115 } else {
116 assert(lvalue.isExtVectorElt());
Alexey Bataevb8329262015-02-27 06:33:30 +0000117 ValueTy = lvalue.getType();
118 ValueSizeInBits = C.getTypeSize(ValueTy);
119 AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
120 lvalue.getType(), lvalue.getExtVectorAddr()
121 ->getType()
122 ->getPointerElementType()
123 ->getVectorNumElements());
124 AtomicSizeInBits = C.getTypeSize(AtomicTy);
125 AtomicAlign = ValueAlign = lvalue.getAlignment();
Alexey Bataevb57056f2015-01-22 06:17:56 +0000126 LVal = lvalue;
127 }
Alexey Bataev452d8e12014-12-15 05:25:25 +0000128 UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
129 AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
John McCalla8ec7eb2013-03-07 21:37:17 +0000130 }
131
132 QualType getAtomicType() const { return AtomicTy; }
133 QualType getValueType() const { return ValueTy; }
134 CharUnits getAtomicAlignment() const { return AtomicAlign; }
135 CharUnits getValueAlignment() const { return ValueAlign; }
136 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
Alexey Bataev452d8e12014-12-15 05:25:25 +0000137 uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
John McCalla8ec7eb2013-03-07 21:37:17 +0000138 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
139 bool shouldUseLibcall() const { return UseLibcall; }
Alexey Bataevb57056f2015-01-22 06:17:56 +0000140 const LValue &getAtomicLValue() const { return LVal; }
Alexey Bataevb8329262015-02-27 06:33:30 +0000141 llvm::Value *getAtomicAddress() const {
142 if (LVal.isSimple())
143 return LVal.getAddress();
144 else if (LVal.isBitField())
145 return LVal.getBitFieldAddr();
146 else if (LVal.isVectorElt())
147 return LVal.getVectorAddr();
148 assert(LVal.isExtVectorElt());
149 return LVal.getExtVectorAddr();
150 }
John McCalla8ec7eb2013-03-07 21:37:17 +0000151
152 /// Is the atomic size larger than the underlying value type?
153 ///
154 /// Note that the absence of padding does not mean that atomic
155 /// objects are completely interchangeable with non-atomic
156 /// objects: we might have promoted the alignment of a type
157 /// without making it bigger.
158 bool hasPadding() const {
159 return (ValueSizeInBits != AtomicSizeInBits);
160 }
161
Alexey Bataevb57056f2015-01-22 06:17:56 +0000162 bool emitMemSetZeroIfNecessary() const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000163
164 llvm::Value *getAtomicSizeValue() const {
165 CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
166 return CGF.CGM.getSize(size);
167 }
168
169 /// Cast the given pointer to an integer pointer suitable for
170 /// atomic operations.
171 llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
172
173 /// Turn an atomic-layout object into an r-value.
Alexey Bataevb8329262015-02-27 06:33:30 +0000174 RValue convertTempToRValue(llvm::Value *addr, AggValueSlot resultSlot,
175 SourceLocation loc, bool AsValue) const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000176
Alexey Bataev452d8e12014-12-15 05:25:25 +0000177 /// \brief Converts a rvalue to integer value.
178 llvm::Value *convertRValueToInt(RValue RVal) const;
179
Alexey Bataevb8329262015-02-27 06:33:30 +0000180 RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
181 AggValueSlot ResultSlot,
182 SourceLocation Loc, bool AsValue) const;
Alexey Bataev452d8e12014-12-15 05:25:25 +0000183
John McCalla8ec7eb2013-03-07 21:37:17 +0000184 /// Copy an atomic r-value into atomic-layout memory.
Alexey Bataevb57056f2015-01-22 06:17:56 +0000185 void emitCopyIntoMemory(RValue rvalue) const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000186
187 /// Project an l-value down to the value field.
Alexey Bataevb57056f2015-01-22 06:17:56 +0000188 LValue projectValue() const {
189 assert(LVal.isSimple());
Alexey Bataevb8329262015-02-27 06:33:30 +0000190 llvm::Value *addr = getAtomicAddress();
John McCalla8ec7eb2013-03-07 21:37:17 +0000191 if (hasPadding())
David Blaikie1ed728c2015-04-05 22:45:47 +0000192 addr = CGF.Builder.CreateStructGEP(nullptr, addr, 0);
John McCalla8ec7eb2013-03-07 21:37:17 +0000193
Alexey Bataevb57056f2015-01-22 06:17:56 +0000194 return LValue::MakeAddr(addr, getValueType(), LVal.getAlignment(),
195 CGF.getContext(), LVal.getTBAAInfo());
John McCalla8ec7eb2013-03-07 21:37:17 +0000196 }
197
Alexey Bataevb8329262015-02-27 06:33:30 +0000198 /// \brief Emits atomic load.
199 /// \returns Loaded value.
200 RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
201 bool AsValue, llvm::AtomicOrdering AO,
202 bool IsVolatile);
203
204 /// \brief Emits atomic compare-and-exchange sequence.
205 /// \param Expected Expected value.
206 /// \param Desired Desired value.
207 /// \param Success Atomic ordering for success operation.
208 /// \param Failure Atomic ordering for failed operation.
209 /// \param IsWeak true if atomic operation is weak, false otherwise.
210 /// \returns Pair of values: previous value from storage (value type) and
211 /// boolean flag (i1 type) with true if success and false otherwise.
Alexey Bataevb4505a72015-03-30 05:20:59 +0000212 std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
Alexey Bataevb8329262015-02-27 06:33:30 +0000213 RValue Expected, RValue Desired,
214 llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
215 llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
216 bool IsWeak = false);
217
Alexey Bataevf0ab5532015-05-15 08:36:34 +0000218 /// \brief Emits atomic update.
NAKAMURA Takumid16af5d2015-05-15 13:47:52 +0000219 /// \param AO Atomic ordering.
220 /// \param UpdateOp Update operation for the current lvalue.
Alexey Bataevf0ab5532015-05-15 08:36:34 +0000221 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
222 const llvm::function_ref<RValue(RValue)> &UpdateOp,
223 bool IsVolatile);
224 /// \brief Emits atomic update.
NAKAMURA Takumid16af5d2015-05-15 13:47:52 +0000225 /// \param AO Atomic ordering.
Alexey Bataevf0ab5532015-05-15 08:36:34 +0000226 void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
227 bool IsVolatile);
228
John McCalla8ec7eb2013-03-07 21:37:17 +0000229 /// Materialize an atomic r-value in atomic-layout memory.
230 llvm::Value *materializeRValue(RValue rvalue) const;
231
Alexey Bataevb8329262015-02-27 06:33:30 +0000232 /// \brief Translates LLVM atomic ordering to GNU atomic ordering for
233 /// libcalls.
234 static AtomicExpr::AtomicOrderingKind
235 translateAtomicOrdering(const llvm::AtomicOrdering AO);
236
John McCalla8ec7eb2013-03-07 21:37:17 +0000237 private:
238 bool requiresMemSetZero(llvm::Type *type) const;
Alexey Bataevb8329262015-02-27 06:33:30 +0000239
240 /// \brief Creates temp alloca for intermediate operations on atomic value.
241 llvm::Value *CreateTempAlloca() const;
242
243 /// \brief Emits atomic load as a libcall.
244 void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
245 llvm::AtomicOrdering AO, bool IsVolatile);
246 /// \brief Emits atomic load as LLVM instruction.
247 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
248 /// \brief Emits atomic compare-and-exchange op as a libcall.
Alexey Bataevf0ab5532015-05-15 08:36:34 +0000249 llvm::Value *EmitAtomicCompareExchangeLibcall(
250 llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
Alexey Bataevb8329262015-02-27 06:33:30 +0000251 llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
252 llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent);
253 /// \brief Emits atomic compare-and-exchange op as LLVM instruction.
Alexey Bataevf0ab5532015-05-15 08:36:34 +0000254 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
255 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
Alexey Bataevb8329262015-02-27 06:33:30 +0000256 llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
257 llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
258 bool IsWeak = false);
Alexey Bataevf0ab5532015-05-15 08:36:34 +0000259 /// \brief Emit atomic update as libcalls.
260 void
261 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
262 const llvm::function_ref<RValue(RValue)> &UpdateOp,
263 bool IsVolatile);
264 /// \brief Emit atomic update as LLVM instructions.
265 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
266 const llvm::function_ref<RValue(RValue)> &UpdateOp,
267 bool IsVolatile);
268 /// \brief Emit atomic update as libcalls.
269 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
270 bool IsVolatile);
271 /// \brief Emit atomic update as LLVM instructions.
272 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
273 bool IsVolatile);
John McCalla8ec7eb2013-03-07 21:37:17 +0000274 };
Alexander Kornienko3d9d9292015-06-22 09:47:44 +0000275} // namespace
John McCalla8ec7eb2013-03-07 21:37:17 +0000276
Alexey Bataevb8329262015-02-27 06:33:30 +0000277AtomicExpr::AtomicOrderingKind
278AtomicInfo::translateAtomicOrdering(const llvm::AtomicOrdering AO) {
279 switch (AO) {
280 case llvm::Unordered:
281 case llvm::NotAtomic:
282 case llvm::Monotonic:
283 return AtomicExpr::AO_ABI_memory_order_relaxed;
284 case llvm::Acquire:
285 return AtomicExpr::AO_ABI_memory_order_acquire;
286 case llvm::Release:
287 return AtomicExpr::AO_ABI_memory_order_release;
288 case llvm::AcquireRelease:
289 return AtomicExpr::AO_ABI_memory_order_acq_rel;
290 case llvm::SequentiallyConsistent:
291 return AtomicExpr::AO_ABI_memory_order_seq_cst;
292 }
Aaron Ballman152ad172015-02-27 13:55:58 +0000293 llvm_unreachable("Unhandled AtomicOrdering");
Alexey Bataevb8329262015-02-27 06:33:30 +0000294}
295
296llvm::Value *AtomicInfo::CreateTempAlloca() const {
297 auto *TempAlloca = CGF.CreateMemTemp(
298 (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
299 : AtomicTy,
300 "atomic-temp");
301 TempAlloca->setAlignment(getAtomicAlignment().getQuantity());
302 // Cast to pointer to value type for bitfields.
303 if (LVal.isBitField())
304 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
305 TempAlloca, getAtomicAddress()->getType());
306 return TempAlloca;
307}
308
John McCalla8ec7eb2013-03-07 21:37:17 +0000309static RValue emitAtomicLibcall(CodeGenFunction &CGF,
310 StringRef fnName,
311 QualType resultType,
312 CallArgList &args) {
313 const CGFunctionInfo &fnInfo =
314 CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
315 FunctionType::ExtInfo(), RequiredArgs::All);
316 llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
317 llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
318 return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
319}
320
321/// Does a store of the given IR type modify the full expected width?
322static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
323 uint64_t expectedSize) {
324 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
325}
326
327/// Does the atomic type require memsetting to zero before initialization?
328///
329/// The IR type is provided as a way of making certain queries faster.
330bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
331 // If the atomic type has size padding, we definitely need a memset.
332 if (hasPadding()) return true;
333
334 // Otherwise, do some simple heuristics to try to avoid it:
335 switch (getEvaluationKind()) {
336 // For scalars and complexes, check whether the store size of the
337 // type uses the full size.
338 case TEK_Scalar:
339 return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
340 case TEK_Complex:
341 return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
342 AtomicSizeInBits / 2);
343
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000344 // Padding in structs has an undefined bit pattern. User beware.
John McCalla8ec7eb2013-03-07 21:37:17 +0000345 case TEK_Aggregate:
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000346 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000347 }
348 llvm_unreachable("bad evaluation kind");
349}
350
Alexey Bataevb57056f2015-01-22 06:17:56 +0000351bool AtomicInfo::emitMemSetZeroIfNecessary() const {
352 assert(LVal.isSimple());
353 llvm::Value *addr = LVal.getAddress();
John McCalla8ec7eb2013-03-07 21:37:17 +0000354 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000355 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000356
Alexey Bataevb8329262015-02-27 06:33:30 +0000357 CGF.Builder.CreateMemSet(
358 addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
359 CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
360 LVal.getAlignment().getQuantity());
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000361 return true;
John McCalla8ec7eb2013-03-07 21:37:17 +0000362}
363
Tim Northovercadbbe12014-06-13 19:43:04 +0000364static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
Tim Northover9c177222014-03-13 19:25:48 +0000365 llvm::Value *Dest, llvm::Value *Ptr,
366 llvm::Value *Val1, llvm::Value *Val2,
367 uint64_t Size, unsigned Align,
368 llvm::AtomicOrdering SuccessOrder,
369 llvm::AtomicOrdering FailureOrder) {
370 // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
371 llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
372 Expected->setAlignment(Align);
373 llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
374 Desired->setAlignment(Align);
375
Tim Northoverb49b04b2014-06-13 14:24:59 +0000376 llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
Tim Northover9c177222014-03-13 19:25:48 +0000377 Ptr, Expected, Desired, SuccessOrder, FailureOrder);
Tim Northoverb49b04b2014-06-13 14:24:59 +0000378 Pair->setVolatile(E->isVolatile());
Tim Northovercadbbe12014-06-13 19:43:04 +0000379 Pair->setWeak(IsWeak);
Tim Northover9c177222014-03-13 19:25:48 +0000380
381 // Cmp holds the result of the compare-exchange operation: true on success,
382 // false on failure.
Tim Northoverb49b04b2014-06-13 14:24:59 +0000383 llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
384 llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
Tim Northover9c177222014-03-13 19:25:48 +0000385
386 // This basic block is used to hold the store instruction if the operation
387 // failed.
388 llvm::BasicBlock *StoreExpectedBB =
389 CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
390
391 // This basic block is the exit point of the operation, we should end up
392 // here regardless of whether or not the operation succeeded.
393 llvm::BasicBlock *ContinueBB =
394 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
395
396 // Update Expected if Expected isn't equal to Old, otherwise branch to the
397 // exit point.
398 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
399
400 CGF.Builder.SetInsertPoint(StoreExpectedBB);
401 // Update the memory at Expected with Old's value.
402 llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
403 StoreExpected->setAlignment(Align);
404 // Finally, branch to the exit point.
405 CGF.Builder.CreateBr(ContinueBB);
406
407 CGF.Builder.SetInsertPoint(ContinueBB);
408 // Update the memory at Dest with Cmp's value.
409 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
410 return;
411}
412
413/// Given an ordering required on success, emit all possible cmpxchg
414/// instructions to cope with the provided (but possibly only dynamically known)
415/// FailureOrder.
416static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
Tim Northovercadbbe12014-06-13 19:43:04 +0000417 bool IsWeak, llvm::Value *Dest,
418 llvm::Value *Ptr, llvm::Value *Val1,
419 llvm::Value *Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000420 llvm::Value *FailureOrderVal,
421 uint64_t Size, unsigned Align,
422 llvm::AtomicOrdering SuccessOrder) {
423 llvm::AtomicOrdering FailureOrder;
424 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
425 switch (FO->getSExtValue()) {
426 default:
427 FailureOrder = llvm::Monotonic;
428 break;
429 case AtomicExpr::AO_ABI_memory_order_consume:
430 case AtomicExpr::AO_ABI_memory_order_acquire:
431 FailureOrder = llvm::Acquire;
432 break;
433 case AtomicExpr::AO_ABI_memory_order_seq_cst:
434 FailureOrder = llvm::SequentiallyConsistent;
435 break;
436 }
437 if (FailureOrder >= SuccessOrder) {
438 // Don't assert on undefined behaviour.
439 FailureOrder =
440 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
441 }
Tim Northovercadbbe12014-06-13 19:43:04 +0000442 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align,
443 SuccessOrder, FailureOrder);
Tim Northover9c177222014-03-13 19:25:48 +0000444 return;
445 }
446
447 // Create all the relevant BB's
Craig Topper8a13c412014-05-21 05:09:00 +0000448 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
449 *SeqCstBB = nullptr;
Tim Northover9c177222014-03-13 19:25:48 +0000450 MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
451 if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
452 AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
453 if (SuccessOrder == llvm::SequentiallyConsistent)
454 SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
455
456 llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
457
458 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
459
460 // Emit all the different atomics
461
462 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
463 // doesn't matter unless someone is crazy enough to use something that
464 // doesn't fold to a constant for the ordering.
465 CGF.Builder.SetInsertPoint(MonotonicBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000466 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000467 Size, Align, SuccessOrder, llvm::Monotonic);
468 CGF.Builder.CreateBr(ContBB);
469
470 if (AcquireBB) {
471 CGF.Builder.SetInsertPoint(AcquireBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000472 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000473 Size, Align, SuccessOrder, llvm::Acquire);
474 CGF.Builder.CreateBr(ContBB);
475 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
476 AcquireBB);
477 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
478 AcquireBB);
479 }
480 if (SeqCstBB) {
481 CGF.Builder.SetInsertPoint(SeqCstBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000482 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000483 Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
484 CGF.Builder.CreateBr(ContBB);
485 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
486 SeqCstBB);
487 }
488
489 CGF.Builder.SetInsertPoint(ContBB);
490}
491
492static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
493 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
Tim Northovercadbbe12014-06-13 19:43:04 +0000494 llvm::Value *IsWeak, llvm::Value *FailureOrder,
495 uint64_t Size, unsigned Align,
496 llvm::AtomicOrdering Order) {
John McCallfc207f22013-03-07 21:37:12 +0000497 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
498 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
499
500 switch (E->getOp()) {
501 case AtomicExpr::AO__c11_atomic_init:
502 llvm_unreachable("Already handled!");
503
504 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
Tim Northovercadbbe12014-06-13 19:43:04 +0000505 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
506 FailureOrder, Size, Align, Order);
John McCallfc207f22013-03-07 21:37:12 +0000507 return;
Tim Northovercadbbe12014-06-13 19:43:04 +0000508 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
509 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
510 FailureOrder, Size, Align, Order);
511 return;
512 case AtomicExpr::AO__atomic_compare_exchange:
513 case AtomicExpr::AO__atomic_compare_exchange_n: {
514 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
515 emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
516 Val1, Val2, FailureOrder, Size, Align, Order);
517 } else {
518 // Create all the relevant BB's
519 llvm::BasicBlock *StrongBB =
520 CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
521 llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
522 llvm::BasicBlock *ContBB =
523 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
524
525 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
526 SI->addCase(CGF.Builder.getInt1(false), StrongBB);
527
528 CGF.Builder.SetInsertPoint(StrongBB);
529 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
530 FailureOrder, Size, Align, Order);
531 CGF.Builder.CreateBr(ContBB);
532
533 CGF.Builder.SetInsertPoint(WeakBB);
534 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
535 FailureOrder, Size, Align, Order);
536 CGF.Builder.CreateBr(ContBB);
537
538 CGF.Builder.SetInsertPoint(ContBB);
539 }
540 return;
541 }
John McCallfc207f22013-03-07 21:37:12 +0000542 case AtomicExpr::AO__c11_atomic_load:
543 case AtomicExpr::AO__atomic_load_n:
544 case AtomicExpr::AO__atomic_load: {
545 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
546 Load->setAtomic(Order);
547 Load->setAlignment(Size);
548 Load->setVolatile(E->isVolatile());
549 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
550 StoreDest->setAlignment(Align);
551 return;
552 }
553
554 case AtomicExpr::AO__c11_atomic_store:
555 case AtomicExpr::AO__atomic_store:
556 case AtomicExpr::AO__atomic_store_n: {
557 assert(!Dest && "Store does not return a value");
558 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
559 LoadVal1->setAlignment(Align);
560 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
561 Store->setAtomic(Order);
562 Store->setAlignment(Size);
563 Store->setVolatile(E->isVolatile());
564 return;
565 }
566
567 case AtomicExpr::AO__c11_atomic_exchange:
568 case AtomicExpr::AO__atomic_exchange_n:
569 case AtomicExpr::AO__atomic_exchange:
570 Op = llvm::AtomicRMWInst::Xchg;
571 break;
572
573 case AtomicExpr::AO__atomic_add_fetch:
574 PostOp = llvm::Instruction::Add;
575 // Fall through.
576 case AtomicExpr::AO__c11_atomic_fetch_add:
577 case AtomicExpr::AO__atomic_fetch_add:
578 Op = llvm::AtomicRMWInst::Add;
579 break;
580
581 case AtomicExpr::AO__atomic_sub_fetch:
582 PostOp = llvm::Instruction::Sub;
583 // Fall through.
584 case AtomicExpr::AO__c11_atomic_fetch_sub:
585 case AtomicExpr::AO__atomic_fetch_sub:
586 Op = llvm::AtomicRMWInst::Sub;
587 break;
588
589 case AtomicExpr::AO__atomic_and_fetch:
590 PostOp = llvm::Instruction::And;
591 // Fall through.
592 case AtomicExpr::AO__c11_atomic_fetch_and:
593 case AtomicExpr::AO__atomic_fetch_and:
594 Op = llvm::AtomicRMWInst::And;
595 break;
596
597 case AtomicExpr::AO__atomic_or_fetch:
598 PostOp = llvm::Instruction::Or;
599 // Fall through.
600 case AtomicExpr::AO__c11_atomic_fetch_or:
601 case AtomicExpr::AO__atomic_fetch_or:
602 Op = llvm::AtomicRMWInst::Or;
603 break;
604
605 case AtomicExpr::AO__atomic_xor_fetch:
606 PostOp = llvm::Instruction::Xor;
607 // Fall through.
608 case AtomicExpr::AO__c11_atomic_fetch_xor:
609 case AtomicExpr::AO__atomic_fetch_xor:
610 Op = llvm::AtomicRMWInst::Xor;
611 break;
612
613 case AtomicExpr::AO__atomic_nand_fetch:
614 PostOp = llvm::Instruction::And;
615 // Fall through.
616 case AtomicExpr::AO__atomic_fetch_nand:
617 Op = llvm::AtomicRMWInst::Nand;
618 break;
619 }
620
621 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
622 LoadVal1->setAlignment(Align);
623 llvm::AtomicRMWInst *RMWI =
624 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
625 RMWI->setVolatile(E->isVolatile());
626
627 // For __atomic_*_fetch operations, perform the operation again to
628 // determine the value which was written.
629 llvm::Value *Result = RMWI;
630 if (PostOp)
631 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
632 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
633 Result = CGF.Builder.CreateNot(Result);
634 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
635 StoreDest->setAlignment(Align);
636}
637
638// This function emits any expression (scalar, complex, or aggregate)
639// into a temporary alloca.
640static llvm::Value *
641EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
642 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
643 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
644 /*Init*/ true);
645 return DeclPtr;
646}
647
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000648static void
649AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000650 bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000651 SourceLocation Loc, CharUnits SizeInChars) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000652 if (UseOptimizedLibcall) {
653 // Load value and pass it to the function directly.
654 unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
David Majnemer0392cf82014-08-29 07:27:49 +0000655 int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
656 ValTy =
657 CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
658 llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
659 SizeInBits)->getPointerTo();
660 Val = CGF.EmitLoadOfScalar(CGF.Builder.CreateBitCast(Val, IPtrTy), false,
661 Align, CGF.getContext().getPointerType(ValTy),
662 Loc);
663 // Coerce the value into an appropriately sized integer type.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000664 Args.add(RValue::get(Val), ValTy);
665 } else {
666 // Non-optimized functions always take a reference.
667 Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
668 CGF.getContext().VoidPtrTy);
669 }
670}
671
John McCallfc207f22013-03-07 21:37:12 +0000672RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
673 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
674 QualType MemTy = AtomicTy;
675 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
676 MemTy = AT->getValueType();
677 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
678 uint64_t Size = sizeChars.getQuantity();
679 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
680 unsigned Align = alignChars.getQuantity();
681 unsigned MaxInlineWidthInBits =
John McCallc8e01702013-04-16 22:48:15 +0000682 getTarget().getMaxAtomicInlineWidth();
John McCallfc207f22013-03-07 21:37:12 +0000683 bool UseLibcall = (Size != Align ||
684 getContext().toBits(sizeChars) > MaxInlineWidthInBits);
685
Tim Northovercadbbe12014-06-13 19:43:04 +0000686 llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr,
687 *Val2 = nullptr;
Craig Topper8a13c412014-05-21 05:09:00 +0000688 llvm::Value *Ptr = EmitScalarExpr(E->getPtr());
John McCallfc207f22013-03-07 21:37:12 +0000689
690 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
691 assert(!Dest && "Init does not return a value");
John McCalla8ec7eb2013-03-07 21:37:17 +0000692 LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
693 EmitAtomicInit(E->getVal1(), lvalue);
Craig Topper8a13c412014-05-21 05:09:00 +0000694 return RValue::get(nullptr);
John McCallfc207f22013-03-07 21:37:12 +0000695 }
696
Craig Topper8a13c412014-05-21 05:09:00 +0000697 llvm::Value *Order = EmitScalarExpr(E->getOrder());
John McCallfc207f22013-03-07 21:37:12 +0000698
699 switch (E->getOp()) {
700 case AtomicExpr::AO__c11_atomic_init:
701 llvm_unreachable("Already handled!");
702
703 case AtomicExpr::AO__c11_atomic_load:
704 case AtomicExpr::AO__atomic_load_n:
705 break;
706
707 case AtomicExpr::AO__atomic_load:
708 Dest = EmitScalarExpr(E->getVal1());
709 break;
710
711 case AtomicExpr::AO__atomic_store:
712 Val1 = EmitScalarExpr(E->getVal1());
713 break;
714
715 case AtomicExpr::AO__atomic_exchange:
716 Val1 = EmitScalarExpr(E->getVal1());
717 Dest = EmitScalarExpr(E->getVal2());
718 break;
719
720 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
721 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
722 case AtomicExpr::AO__atomic_compare_exchange_n:
723 case AtomicExpr::AO__atomic_compare_exchange:
724 Val1 = EmitScalarExpr(E->getVal1());
725 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
726 Val2 = EmitScalarExpr(E->getVal2());
727 else
728 Val2 = EmitValToTemp(*this, E->getVal2());
729 OrderFail = EmitScalarExpr(E->getOrderFail());
John McCallfc207f22013-03-07 21:37:12 +0000730 if (E->getNumSubExprs() == 6)
Tim Northovercadbbe12014-06-13 19:43:04 +0000731 IsWeak = EmitScalarExpr(E->getWeak());
John McCallfc207f22013-03-07 21:37:12 +0000732 break;
733
734 case AtomicExpr::AO__c11_atomic_fetch_add:
735 case AtomicExpr::AO__c11_atomic_fetch_sub:
736 if (MemTy->isPointerType()) {
737 // For pointer arithmetic, we're required to do a bit of math:
738 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
739 // ... but only for the C11 builtins. The GNU builtins expect the
740 // user to multiply by sizeof(T).
741 QualType Val1Ty = E->getVal1()->getType();
742 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
743 CharUnits PointeeIncAmt =
744 getContext().getTypeSizeInChars(MemTy->getPointeeType());
745 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
746 Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
747 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
748 break;
749 }
750 // Fall through.
751 case AtomicExpr::AO__atomic_fetch_add:
752 case AtomicExpr::AO__atomic_fetch_sub:
753 case AtomicExpr::AO__atomic_add_fetch:
754 case AtomicExpr::AO__atomic_sub_fetch:
755 case AtomicExpr::AO__c11_atomic_store:
756 case AtomicExpr::AO__c11_atomic_exchange:
757 case AtomicExpr::AO__atomic_store_n:
758 case AtomicExpr::AO__atomic_exchange_n:
759 case AtomicExpr::AO__c11_atomic_fetch_and:
760 case AtomicExpr::AO__c11_atomic_fetch_or:
761 case AtomicExpr::AO__c11_atomic_fetch_xor:
762 case AtomicExpr::AO__atomic_fetch_and:
763 case AtomicExpr::AO__atomic_fetch_or:
764 case AtomicExpr::AO__atomic_fetch_xor:
765 case AtomicExpr::AO__atomic_fetch_nand:
766 case AtomicExpr::AO__atomic_and_fetch:
767 case AtomicExpr::AO__atomic_or_fetch:
768 case AtomicExpr::AO__atomic_xor_fetch:
769 case AtomicExpr::AO__atomic_nand_fetch:
770 Val1 = EmitValToTemp(*this, E->getVal1());
771 break;
772 }
773
David Majnemeree8d04d2014-12-12 08:16:09 +0000774 QualType RValTy = E->getType().getUnqualifiedType();
775
David Majnemer659be552014-11-25 23:44:32 +0000776 auto GetDest = [&] {
David Majnemeree8d04d2014-12-12 08:16:09 +0000777 if (!RValTy->isVoidType() && !Dest) {
778 Dest = CreateMemTemp(RValTy, ".atomicdst");
779 }
David Majnemer659be552014-11-25 23:44:32 +0000780 return Dest;
781 };
John McCallfc207f22013-03-07 21:37:12 +0000782
783 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
784 if (UseLibcall) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000785 bool UseOptimizedLibcall = false;
786 switch (E->getOp()) {
787 case AtomicExpr::AO__c11_atomic_fetch_add:
788 case AtomicExpr::AO__atomic_fetch_add:
789 case AtomicExpr::AO__c11_atomic_fetch_and:
790 case AtomicExpr::AO__atomic_fetch_and:
791 case AtomicExpr::AO__c11_atomic_fetch_or:
792 case AtomicExpr::AO__atomic_fetch_or:
793 case AtomicExpr::AO__c11_atomic_fetch_sub:
794 case AtomicExpr::AO__atomic_fetch_sub:
795 case AtomicExpr::AO__c11_atomic_fetch_xor:
796 case AtomicExpr::AO__atomic_fetch_xor:
797 // For these, only library calls for certain sizes exist.
798 UseOptimizedLibcall = true;
799 break;
800 default:
801 // Only use optimized library calls for sizes for which they exist.
802 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
803 UseOptimizedLibcall = true;
804 break;
805 }
John McCallfc207f22013-03-07 21:37:12 +0000806
John McCallfc207f22013-03-07 21:37:12 +0000807 CallArgList Args;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000808 if (!UseOptimizedLibcall) {
809 // For non-optimized library calls, the size is the first parameter
810 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
811 getContext().getSizeType());
812 }
813 // Atomic address is the first or second parameter
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000814 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
John McCallfc207f22013-03-07 21:37:12 +0000815
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000816 std::string LibCallName;
Logan Chien74798a32014-03-26 17:35:01 +0000817 QualType LoweredMemTy =
818 MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000819 QualType RetTy;
820 bool HaveRetTy = false;
John McCallfc207f22013-03-07 21:37:12 +0000821 switch (E->getOp()) {
822 // There is only one libcall for compare an exchange, because there is no
823 // optimisation benefit possible from a libcall version of a weak compare
824 // and exchange.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000825 // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
John McCallfc207f22013-03-07 21:37:12 +0000826 // void *desired, int success, int failure)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000827 // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
828 // int success, int failure)
John McCallfc207f22013-03-07 21:37:12 +0000829 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
830 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
831 case AtomicExpr::AO__atomic_compare_exchange:
832 case AtomicExpr::AO__atomic_compare_exchange_n:
833 LibCallName = "__atomic_compare_exchange";
834 RetTy = getContext().BoolTy;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000835 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000836 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
837 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000838 E->getExprLoc(), sizeChars);
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000839 Args.add(RValue::get(Order), getContext().IntTy);
John McCallfc207f22013-03-07 21:37:12 +0000840 Order = OrderFail;
841 break;
842 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
843 // int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000844 // T __atomic_exchange_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000845 case AtomicExpr::AO__c11_atomic_exchange:
846 case AtomicExpr::AO__atomic_exchange_n:
847 case AtomicExpr::AO__atomic_exchange:
848 LibCallName = "__atomic_exchange";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000849 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000850 E->getExprLoc(), sizeChars);
John McCallfc207f22013-03-07 21:37:12 +0000851 break;
852 // void __atomic_store(size_t size, void *mem, void *val, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000853 // void __atomic_store_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000854 case AtomicExpr::AO__c11_atomic_store:
855 case AtomicExpr::AO__atomic_store:
856 case AtomicExpr::AO__atomic_store_n:
857 LibCallName = "__atomic_store";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000858 RetTy = getContext().VoidTy;
859 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000860 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000861 E->getExprLoc(), sizeChars);
John McCallfc207f22013-03-07 21:37:12 +0000862 break;
863 // void __atomic_load(size_t size, void *mem, void *return, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000864 // T __atomic_load_N(T *mem, int order)
John McCallfc207f22013-03-07 21:37:12 +0000865 case AtomicExpr::AO__c11_atomic_load:
866 case AtomicExpr::AO__atomic_load:
867 case AtomicExpr::AO__atomic_load_n:
868 LibCallName = "__atomic_load";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000869 break;
870 // T __atomic_fetch_add_N(T *mem, T val, int order)
871 case AtomicExpr::AO__c11_atomic_fetch_add:
872 case AtomicExpr::AO__atomic_fetch_add:
873 LibCallName = "__atomic_fetch_add";
Logan Chien74798a32014-03-26 17:35:01 +0000874 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000875 E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000876 break;
877 // T __atomic_fetch_and_N(T *mem, T val, int order)
878 case AtomicExpr::AO__c11_atomic_fetch_and:
879 case AtomicExpr::AO__atomic_fetch_and:
880 LibCallName = "__atomic_fetch_and";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000881 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000882 E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000883 break;
884 // T __atomic_fetch_or_N(T *mem, T val, int order)
885 case AtomicExpr::AO__c11_atomic_fetch_or:
886 case AtomicExpr::AO__atomic_fetch_or:
887 LibCallName = "__atomic_fetch_or";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000888 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000889 E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000890 break;
891 // T __atomic_fetch_sub_N(T *mem, T val, int order)
892 case AtomicExpr::AO__c11_atomic_fetch_sub:
893 case AtomicExpr::AO__atomic_fetch_sub:
894 LibCallName = "__atomic_fetch_sub";
Logan Chien74798a32014-03-26 17:35:01 +0000895 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000896 E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000897 break;
898 // T __atomic_fetch_xor_N(T *mem, T val, int order)
899 case AtomicExpr::AO__c11_atomic_fetch_xor:
900 case AtomicExpr::AO__atomic_fetch_xor:
901 LibCallName = "__atomic_fetch_xor";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000902 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000903 E->getExprLoc(), sizeChars);
John McCallfc207f22013-03-07 21:37:12 +0000904 break;
John McCallfc207f22013-03-07 21:37:12 +0000905 default: return EmitUnsupportedRValue(E, "atomic library call");
906 }
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000907
908 // Optimized functions have the size in their name.
909 if (UseOptimizedLibcall)
910 LibCallName += "_" + llvm::utostr(Size);
911 // By default, assume we return a value of the atomic type.
912 if (!HaveRetTy) {
913 if (UseOptimizedLibcall) {
914 // Value is returned directly.
David Majnemer0392cf82014-08-29 07:27:49 +0000915 // The function returns an appropriately sized integer type.
916 RetTy = getContext().getIntTypeForBitwidth(
917 getContext().toBits(sizeChars), /*Signed=*/false);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000918 } else {
919 // Value is returned through parameter before the order.
920 RetTy = getContext().VoidTy;
David Majnemer659be552014-11-25 23:44:32 +0000921 Args.add(RValue::get(EmitCastToVoidPtr(Dest)), getContext().VoidPtrTy);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000922 }
923 }
John McCallfc207f22013-03-07 21:37:12 +0000924 // order is always the last parameter
925 Args.add(RValue::get(Order),
926 getContext().IntTy);
927
David Majnemer659be552014-11-25 23:44:32 +0000928 RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
929 // The value is returned directly from the libcall.
930 if (HaveRetTy && !RetTy->isVoidType())
931 return Res;
932 // The value is returned via an explicit out param.
933 if (RetTy->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +0000934 return RValue::get(nullptr);
David Majnemer659be552014-11-25 23:44:32 +0000935 // The value is returned directly for optimized libcalls but the caller is
936 // expected an out-param.
937 if (UseOptimizedLibcall) {
938 llvm::Value *ResVal = Res.getScalarVal();
939 llvm::StoreInst *StoreDest = Builder.CreateStore(
940 ResVal,
941 Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo()));
942 StoreDest->setAlignment(Align);
943 }
David Majnemeree8d04d2014-12-12 08:16:09 +0000944 return convertTempToRValue(Dest, RValTy, E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000945 }
946
947 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
948 E->getOp() == AtomicExpr::AO__atomic_store ||
949 E->getOp() == AtomicExpr::AO__atomic_store_n;
950 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
951 E->getOp() == AtomicExpr::AO__atomic_load ||
952 E->getOp() == AtomicExpr::AO__atomic_load_n;
953
David Majnemerd8cd8f72014-11-22 10:44:12 +0000954 llvm::Type *ITy =
955 llvm::IntegerType::get(getLLVMContext(), Size * 8);
David Majnemer659be552014-11-25 23:44:32 +0000956 llvm::Value *OrigDest = GetDest();
David Majnemerd8cd8f72014-11-22 10:44:12 +0000957 Ptr = Builder.CreateBitCast(
958 Ptr, ITy->getPointerTo(Ptr->getType()->getPointerAddressSpace()));
959 if (Val1) Val1 = Builder.CreateBitCast(Val1, ITy->getPointerTo());
960 if (Val2) Val2 = Builder.CreateBitCast(Val2, ITy->getPointerTo());
961 if (Dest && !E->isCmpXChg())
962 Dest = Builder.CreateBitCast(Dest, ITy->getPointerTo());
John McCallfc207f22013-03-07 21:37:12 +0000963
964 if (isa<llvm::ConstantInt>(Order)) {
965 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
966 switch (ord) {
Tim Northovere94a34c2014-03-11 10:49:14 +0000967 case AtomicExpr::AO_ABI_memory_order_relaxed:
Tim Northovercadbbe12014-06-13 19:43:04 +0000968 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000969 Size, Align, llvm::Monotonic);
John McCallfc207f22013-03-07 21:37:12 +0000970 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000971 case AtomicExpr::AO_ABI_memory_order_consume:
972 case AtomicExpr::AO_ABI_memory_order_acquire:
John McCallfc207f22013-03-07 21:37:12 +0000973 if (IsStore)
974 break; // Avoid crashing on code with undefined behavior
Tim Northovercadbbe12014-06-13 19:43:04 +0000975 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000976 Size, Align, llvm::Acquire);
John McCallfc207f22013-03-07 21:37:12 +0000977 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000978 case AtomicExpr::AO_ABI_memory_order_release:
John McCallfc207f22013-03-07 21:37:12 +0000979 if (IsLoad)
980 break; // Avoid crashing on code with undefined behavior
Tim Northovercadbbe12014-06-13 19:43:04 +0000981 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000982 Size, Align, llvm::Release);
John McCallfc207f22013-03-07 21:37:12 +0000983 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000984 case AtomicExpr::AO_ABI_memory_order_acq_rel:
John McCallfc207f22013-03-07 21:37:12 +0000985 if (IsLoad || IsStore)
986 break; // Avoid crashing on code with undefined behavior
Tim Northovercadbbe12014-06-13 19:43:04 +0000987 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000988 Size, Align, llvm::AcquireRelease);
John McCallfc207f22013-03-07 21:37:12 +0000989 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000990 case AtomicExpr::AO_ABI_memory_order_seq_cst:
Tim Northovercadbbe12014-06-13 19:43:04 +0000991 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000992 Size, Align, llvm::SequentiallyConsistent);
John McCallfc207f22013-03-07 21:37:12 +0000993 break;
994 default: // invalid order
995 // We should not ever get here normally, but it's hard to
996 // enforce that in general.
997 break;
998 }
David Majnemeree8d04d2014-12-12 08:16:09 +0000999 if (RValTy->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +00001000 return RValue::get(nullptr);
David Majnemeree8d04d2014-12-12 08:16:09 +00001001 return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +00001002 }
1003
1004 // Long case, when Order isn't obviously constant.
1005
1006 // Create all the relevant BB's
Craig Topper8a13c412014-05-21 05:09:00 +00001007 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1008 *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1009 *SeqCstBB = nullptr;
John McCallfc207f22013-03-07 21:37:12 +00001010 MonotonicBB = createBasicBlock("monotonic", CurFn);
1011 if (!IsStore)
1012 AcquireBB = createBasicBlock("acquire", CurFn);
1013 if (!IsLoad)
1014 ReleaseBB = createBasicBlock("release", CurFn);
1015 if (!IsLoad && !IsStore)
1016 AcqRelBB = createBasicBlock("acqrel", CurFn);
1017 SeqCstBB = createBasicBlock("seqcst", CurFn);
1018 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1019
1020 // Create the switch for the split
1021 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1022 // doesn't matter unless someone is crazy enough to use something that
1023 // doesn't fold to a constant for the ordering.
1024 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1025 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1026
1027 // Emit all the different atomics
1028 Builder.SetInsertPoint(MonotonicBB);
Tim Northovercadbbe12014-06-13 19:43:04 +00001029 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +00001030 Size, Align, llvm::Monotonic);
John McCallfc207f22013-03-07 21:37:12 +00001031 Builder.CreateBr(ContBB);
1032 if (!IsStore) {
1033 Builder.SetInsertPoint(AcquireBB);
Tim Northovercadbbe12014-06-13 19:43:04 +00001034 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +00001035 Size, Align, llvm::Acquire);
John McCallfc207f22013-03-07 21:37:12 +00001036 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +00001037 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
1038 AcquireBB);
1039 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
1040 AcquireBB);
John McCallfc207f22013-03-07 21:37:12 +00001041 }
1042 if (!IsLoad) {
1043 Builder.SetInsertPoint(ReleaseBB);
Tim Northovercadbbe12014-06-13 19:43:04 +00001044 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +00001045 Size, Align, llvm::Release);
John McCallfc207f22013-03-07 21:37:12 +00001046 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +00001047 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
1048 ReleaseBB);
John McCallfc207f22013-03-07 21:37:12 +00001049 }
1050 if (!IsLoad && !IsStore) {
1051 Builder.SetInsertPoint(AcqRelBB);
Tim Northovercadbbe12014-06-13 19:43:04 +00001052 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +00001053 Size, Align, llvm::AcquireRelease);
John McCallfc207f22013-03-07 21:37:12 +00001054 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +00001055 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
1056 AcqRelBB);
John McCallfc207f22013-03-07 21:37:12 +00001057 }
1058 Builder.SetInsertPoint(SeqCstBB);
Tim Northovercadbbe12014-06-13 19:43:04 +00001059 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +00001060 Size, Align, llvm::SequentiallyConsistent);
John McCallfc207f22013-03-07 21:37:12 +00001061 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +00001062 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
1063 SeqCstBB);
John McCallfc207f22013-03-07 21:37:12 +00001064
1065 // Cleanup and return
1066 Builder.SetInsertPoint(ContBB);
David Majnemeree8d04d2014-12-12 08:16:09 +00001067 if (RValTy->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +00001068 return RValue::get(nullptr);
David Majnemeree8d04d2014-12-12 08:16:09 +00001069 return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +00001070}
John McCalla8ec7eb2013-03-07 21:37:17 +00001071
1072llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
1073 unsigned addrspace =
1074 cast<llvm::PointerType>(addr->getType())->getAddressSpace();
1075 llvm::IntegerType *ty =
1076 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1077 return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1078}
1079
1080RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
Nick Lewycky2d84e842013-10-02 02:29:49 +00001081 AggValueSlot resultSlot,
Alexey Bataevb8329262015-02-27 06:33:30 +00001082 SourceLocation loc, bool AsValue) const {
Alexey Bataevb57056f2015-01-22 06:17:56 +00001083 if (LVal.isSimple()) {
1084 if (EvaluationKind == TEK_Aggregate)
1085 return resultSlot.asRValue();
John McCalla8ec7eb2013-03-07 21:37:17 +00001086
Alexey Bataevb57056f2015-01-22 06:17:56 +00001087 // Drill into the padding structure if we have one.
1088 if (hasPadding())
David Blaikie1ed728c2015-04-05 22:45:47 +00001089 addr = CGF.Builder.CreateStructGEP(nullptr, addr, 0);
John McCalla8ec7eb2013-03-07 21:37:17 +00001090
Alexey Bataevb57056f2015-01-22 06:17:56 +00001091 // Otherwise, just convert the temporary to an r-value using the
1092 // normal conversion routine.
1093 return CGF.convertTempToRValue(addr, getValueType(), loc);
David Blaikie1ed728c2015-04-05 22:45:47 +00001094 }
1095 if (!AsValue)
Alexey Bataevb8329262015-02-27 06:33:30 +00001096 // Get RValue from temp memory as atomic for non-simple lvalues
1097 return RValue::get(
1098 CGF.Builder.CreateAlignedLoad(addr, AtomicAlign.getQuantity()));
David Blaikie1ed728c2015-04-05 22:45:47 +00001099 if (LVal.isBitField())
Alexey Bataevb57056f2015-01-22 06:17:56 +00001100 return CGF.EmitLoadOfBitfieldLValue(LValue::MakeBitfield(
1101 addr, LVal.getBitFieldInfo(), LVal.getType(), LVal.getAlignment()));
David Blaikie1ed728c2015-04-05 22:45:47 +00001102 if (LVal.isVectorElt())
Alexey Bataevb57056f2015-01-22 06:17:56 +00001103 return CGF.EmitLoadOfLValue(LValue::MakeVectorElt(addr, LVal.getVectorIdx(),
1104 LVal.getType(),
1105 LVal.getAlignment()),
1106 loc);
1107 assert(LVal.isExtVectorElt());
1108 return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
1109 addr, LVal.getExtVectorElts(), LVal.getType(), LVal.getAlignment()));
John McCalla8ec7eb2013-03-07 21:37:17 +00001110}
1111
Alexey Bataevb8329262015-02-27 06:33:30 +00001112RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1113 AggValueSlot ResultSlot,
1114 SourceLocation Loc,
1115 bool AsValue) const {
Alexey Bataev452d8e12014-12-15 05:25:25 +00001116 // Try not to in some easy cases.
1117 assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
Alexey Bataevb8329262015-02-27 06:33:30 +00001118 if (getEvaluationKind() == TEK_Scalar &&
1119 (((!LVal.isBitField() ||
1120 LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1121 !hasPadding()) ||
1122 !AsValue)) {
1123 auto *ValTy = AsValue
1124 ? CGF.ConvertTypeForMem(ValueTy)
1125 : getAtomicAddress()->getType()->getPointerElementType();
Alexey Bataev452d8e12014-12-15 05:25:25 +00001126 if (ValTy->isIntegerTy()) {
1127 assert(IntVal->getType() == ValTy && "Different integer types.");
David Majnemereeaec262015-02-14 02:18:14 +00001128 return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
Alexey Bataev452d8e12014-12-15 05:25:25 +00001129 } else if (ValTy->isPointerTy())
1130 return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1131 else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1132 return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1133 }
1134
1135 // Create a temporary. This needs to be big enough to hold the
1136 // atomic integer.
1137 llvm::Value *Temp;
1138 bool TempIsVolatile = false;
1139 CharUnits TempAlignment;
Alexey Bataevb8329262015-02-27 06:33:30 +00001140 if (AsValue && getEvaluationKind() == TEK_Aggregate) {
Alexey Bataev452d8e12014-12-15 05:25:25 +00001141 assert(!ResultSlot.isIgnored());
1142 Temp = ResultSlot.getAddr();
1143 TempAlignment = getValueAlignment();
1144 TempIsVolatile = ResultSlot.isVolatile();
1145 } else {
Alexey Bataevb8329262015-02-27 06:33:30 +00001146 Temp = CreateTempAlloca();
Alexey Bataev452d8e12014-12-15 05:25:25 +00001147 TempAlignment = getAtomicAlignment();
1148 }
1149
1150 // Slam the integer into the temporary.
1151 llvm::Value *CastTemp = emitCastToAtomicIntPointer(Temp);
1152 CGF.Builder.CreateAlignedStore(IntVal, CastTemp, TempAlignment.getQuantity())
1153 ->setVolatile(TempIsVolatile);
1154
Alexey Bataevb8329262015-02-27 06:33:30 +00001155 return convertTempToRValue(Temp, ResultSlot, Loc, AsValue);
1156}
1157
1158void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1159 llvm::AtomicOrdering AO, bool) {
1160 // void __atomic_load(size_t size, void *mem, void *return, int order);
1161 CallArgList Args;
1162 Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1163 Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicAddress())),
1164 CGF.getContext().VoidPtrTy);
1165 Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1166 CGF.getContext().VoidPtrTy);
1167 Args.add(RValue::get(
1168 llvm::ConstantInt::get(CGF.IntTy, translateAtomicOrdering(AO))),
1169 CGF.getContext().IntTy);
1170 emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1171}
1172
1173llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1174 bool IsVolatile) {
1175 // Okay, we're doing this natively.
1176 llvm::Value *Addr = emitCastToAtomicIntPointer(getAtomicAddress());
1177 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1178 Load->setAtomic(AO);
1179
1180 // Other decoration.
1181 Load->setAlignment(getAtomicAlignment().getQuantity());
1182 if (IsVolatile)
1183 Load->setVolatile(true);
1184 if (LVal.getTBAAInfo())
1185 CGF.CGM.DecorateInstruction(Load, LVal.getTBAAInfo());
1186 return Load;
Alexey Bataev452d8e12014-12-15 05:25:25 +00001187}
1188
David Majnemera5b195a2015-02-14 01:35:12 +00001189/// An LValue is a candidate for having its loads and stores be made atomic if
1190/// we are operating under /volatile:ms *and* the LValue itself is volatile and
1191/// performing such an operation can be performed without a libcall.
1192bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
1193 AtomicInfo AI(*this, LV);
1194 bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1195 // An atomic is inline if we don't need to use a libcall.
1196 bool AtomicIsInline = !AI.shouldUseLibcall();
1197 return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline;
1198}
1199
1200/// An type is a candidate for having its loads and stores be made atomic if
1201/// we are operating under /volatile:ms *and* we know the access is volatile and
1202/// performing such an operation can be performed without a libcall.
1203bool CodeGenFunction::typeIsSuitableForInlineAtomic(QualType Ty,
1204 bool IsVolatile) const {
1205 // An atomic is inline if we don't need to use a libcall (e.g. it is builtin).
1206 bool AtomicIsInline = getContext().getTargetInfo().hasBuiltinAtomic(
1207 getContext().getTypeSize(Ty), getContext().getTypeAlign(Ty));
1208 return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline;
1209}
1210
1211RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
1212 AggValueSlot Slot) {
1213 llvm::AtomicOrdering AO;
1214 bool IsVolatile = LV.isVolatileQualified();
1215 if (LV.getType()->isAtomicType()) {
1216 AO = llvm::SequentiallyConsistent;
1217 } else {
1218 AO = llvm::Acquire;
1219 IsVolatile = true;
1220 }
1221 return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1222}
1223
Alexey Bataevb8329262015-02-27 06:33:30 +00001224RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1225 bool AsValue, llvm::AtomicOrdering AO,
1226 bool IsVolatile) {
1227 // Check whether we should use a library call.
1228 if (shouldUseLibcall()) {
1229 llvm::Value *TempAddr;
1230 if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1231 assert(getEvaluationKind() == TEK_Aggregate);
1232 TempAddr = ResultSlot.getAddr();
1233 } else
1234 TempAddr = CreateTempAlloca();
1235
1236 EmitAtomicLoadLibcall(TempAddr, AO, IsVolatile);
1237
1238 // Okay, turn that back into the original value or whole atomic (for
1239 // non-simple lvalues) type.
1240 return convertTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1241 }
1242
1243 // Okay, we're doing this natively.
1244 auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1245
1246 // If we're ignoring an aggregate return, don't do anything.
1247 if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1248 return RValue::getAggregate(nullptr, false);
1249
1250 // Okay, turn that back into the original value or atomic (for non-simple
1251 // lvalues) type.
1252 return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1253}
1254
John McCalla8ec7eb2013-03-07 21:37:17 +00001255/// Emit a load from an l-value of atomic type. Note that the r-value
1256/// we produce is an r-value of the atomic *value* type.
Nick Lewycky2d84e842013-10-02 02:29:49 +00001257RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
David Majnemera5b195a2015-02-14 01:35:12 +00001258 llvm::AtomicOrdering AO, bool IsVolatile,
Nick Lewycky2d84e842013-10-02 02:29:49 +00001259 AggValueSlot resultSlot) {
Alexey Bataevb8329262015-02-27 06:33:30 +00001260 AtomicInfo Atomics(*this, src);
1261 return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1262 IsVolatile);
John McCalla8ec7eb2013-03-07 21:37:17 +00001263}
1264
John McCalla8ec7eb2013-03-07 21:37:17 +00001265/// Copy an r-value into memory as part of storing to an atomic type.
1266/// This needs to create a bit-pattern suitable for atomic operations.
Alexey Bataevb57056f2015-01-22 06:17:56 +00001267void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1268 assert(LVal.isSimple());
John McCalla8ec7eb2013-03-07 21:37:17 +00001269 // If we have an r-value, the rvalue should be of the atomic type,
1270 // which means that the caller is responsible for having zeroed
1271 // any padding. Just do an aggregate copy of that type.
1272 if (rvalue.isAggregate()) {
Alexey Bataevb8329262015-02-27 06:33:30 +00001273 CGF.EmitAggregateCopy(getAtomicAddress(),
John McCalla8ec7eb2013-03-07 21:37:17 +00001274 rvalue.getAggregateAddr(),
1275 getAtomicType(),
1276 (rvalue.isVolatileQualified()
Alexey Bataevb57056f2015-01-22 06:17:56 +00001277 || LVal.isVolatileQualified()),
1278 LVal.getAlignment());
John McCalla8ec7eb2013-03-07 21:37:17 +00001279 return;
1280 }
1281
1282 // Okay, otherwise we're copying stuff.
1283
1284 // Zero out the buffer if necessary.
Alexey Bataevb57056f2015-01-22 06:17:56 +00001285 emitMemSetZeroIfNecessary();
John McCalla8ec7eb2013-03-07 21:37:17 +00001286
1287 // Drill past the padding if present.
Alexey Bataevb57056f2015-01-22 06:17:56 +00001288 LValue TempLVal = projectValue();
John McCalla8ec7eb2013-03-07 21:37:17 +00001289
1290 // Okay, store the rvalue in.
1291 if (rvalue.isScalar()) {
Alexey Bataevb57056f2015-01-22 06:17:56 +00001292 CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
John McCalla8ec7eb2013-03-07 21:37:17 +00001293 } else {
Alexey Bataevb57056f2015-01-22 06:17:56 +00001294 CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
John McCalla8ec7eb2013-03-07 21:37:17 +00001295 }
1296}
1297
1298
1299/// Materialize an r-value into memory for the purposes of storing it
1300/// to an atomic type.
1301llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
1302 // Aggregate r-values are already in memory, and EmitAtomicStore
1303 // requires them to be values of the atomic type.
1304 if (rvalue.isAggregate())
1305 return rvalue.getAggregateAddr();
1306
1307 // Otherwise, make a temporary and materialize into it.
Alexey Bataevb8329262015-02-27 06:33:30 +00001308 LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType(),
1309 getAtomicAlignment());
1310 AtomicInfo Atomics(CGF, TempLV);
Alexey Bataevb57056f2015-01-22 06:17:56 +00001311 Atomics.emitCopyIntoMemory(rvalue);
Alexey Bataevb8329262015-02-27 06:33:30 +00001312 return TempLV.getAddress();
John McCalla8ec7eb2013-03-07 21:37:17 +00001313}
1314
Alexey Bataev452d8e12014-12-15 05:25:25 +00001315llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1316 // If we've got a scalar value of the right size, try to avoid going
1317 // through memory.
Alexey Bataevb8329262015-02-27 06:33:30 +00001318 if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
Alexey Bataev452d8e12014-12-15 05:25:25 +00001319 llvm::Value *Value = RVal.getScalarVal();
1320 if (isa<llvm::IntegerType>(Value->getType()))
Alexey Bataevb4505a72015-03-30 05:20:59 +00001321 return CGF.EmitToMemory(Value, ValueTy);
Alexey Bataev452d8e12014-12-15 05:25:25 +00001322 else {
Alexey Bataevb8329262015-02-27 06:33:30 +00001323 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1324 CGF.getLLVMContext(),
1325 LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
Alexey Bataev452d8e12014-12-15 05:25:25 +00001326 if (isa<llvm::PointerType>(Value->getType()))
1327 return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1328 else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1329 return CGF.Builder.CreateBitCast(Value, InputIntTy);
1330 }
1331 }
1332 // Otherwise, we need to go through memory.
1333 // Put the r-value in memory.
1334 llvm::Value *Addr = materializeRValue(RVal);
1335
1336 // Cast the temporary to the atomic int type and pull a value out.
1337 Addr = emitCastToAtomicIntPointer(Addr);
1338 return CGF.Builder.CreateAlignedLoad(Addr,
1339 getAtomicAlignment().getQuantity());
1340}
1341
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001342std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1343 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1344 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
Alexey Bataevb8329262015-02-27 06:33:30 +00001345 // Do the atomic store.
1346 auto *Addr = emitCastToAtomicIntPointer(getAtomicAddress());
Alexey Bataevb4505a72015-03-30 05:20:59 +00001347 auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr, ExpectedVal, DesiredVal,
1348 Success, Failure);
Alexey Bataevb8329262015-02-27 06:33:30 +00001349 // Other decoration.
1350 Inst->setVolatile(LVal.isVolatileQualified());
1351 Inst->setWeak(IsWeak);
1352
1353 // Okay, turn that back into the original value type.
1354 auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1355 auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001356 return std::make_pair(PreviousVal, SuccessFailureVal);
Alexey Bataevb8329262015-02-27 06:33:30 +00001357}
1358
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001359llvm::Value *
1360AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1361 llvm::Value *DesiredAddr,
Alexey Bataevb8329262015-02-27 06:33:30 +00001362 llvm::AtomicOrdering Success,
1363 llvm::AtomicOrdering Failure) {
1364 // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1365 // void *desired, int success, int failure);
1366 CallArgList Args;
1367 Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1368 Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicAddress())),
1369 CGF.getContext().VoidPtrTy);
1370 Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1371 CGF.getContext().VoidPtrTy);
1372 Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1373 CGF.getContext().VoidPtrTy);
1374 Args.add(RValue::get(llvm::ConstantInt::get(
1375 CGF.IntTy, translateAtomicOrdering(Success))),
1376 CGF.getContext().IntTy);
1377 Args.add(RValue::get(llvm::ConstantInt::get(
1378 CGF.IntTy, translateAtomicOrdering(Failure))),
1379 CGF.getContext().IntTy);
1380 auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1381 CGF.getContext().BoolTy, Args);
Alexey Bataevb4505a72015-03-30 05:20:59 +00001382
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001383 return SuccessFailureRVal.getScalarVal();
Alexey Bataevb8329262015-02-27 06:33:30 +00001384}
1385
Alexey Bataevb4505a72015-03-30 05:20:59 +00001386std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
Alexey Bataevb8329262015-02-27 06:33:30 +00001387 RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1388 llvm::AtomicOrdering Failure, bool IsWeak) {
1389 if (Failure >= Success)
1390 // Don't assert on undefined behavior.
1391 Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1392
1393 // Check whether we should use a library call.
1394 if (shouldUseLibcall()) {
Alexey Bataevb8329262015-02-27 06:33:30 +00001395 // Produce a source address.
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001396 auto *ExpectedAddr = materializeRValue(Expected);
1397 auto *DesiredAddr = materializeRValue(Desired);
1398 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr,
1399 Success, Failure);
1400 return std::make_pair(
1401 convertTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1402 SourceLocation(), /*AsValue=*/false),
1403 Res);
Alexey Bataevb8329262015-02-27 06:33:30 +00001404 }
1405
1406 // If we've got a scalar value of the right size, try to avoid going
1407 // through memory.
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001408 auto *ExpectedVal = convertRValueToInt(Expected);
1409 auto *DesiredVal = convertRValueToInt(Desired);
1410 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1411 Failure, IsWeak);
1412 return std::make_pair(
1413 ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1414 SourceLocation(), /*AsValue=*/false),
1415 Res.second);
1416}
1417
1418static void
1419EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1420 const llvm::function_ref<RValue(RValue)> &UpdateOp,
1421 llvm::Value *DesiredAddr) {
1422 llvm::Value *Ptr = nullptr;
1423 LValue UpdateLVal;
1424 RValue UpRVal;
1425 LValue AtomicLVal = Atomics.getAtomicLValue();
1426 LValue DesiredLVal;
1427 if (AtomicLVal.isSimple()) {
1428 UpRVal = OldRVal;
1429 DesiredLVal =
1430 LValue::MakeAddr(DesiredAddr, AtomicLVal.getType(),
1431 AtomicLVal.getAlignment(), CGF.CGM.getContext());
1432 } else {
1433 // Build new lvalue for temp address
1434 Ptr = Atomics.materializeRValue(OldRVal);
1435 if (AtomicLVal.isBitField()) {
1436 UpdateLVal =
1437 LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1438 AtomicLVal.getType(), AtomicLVal.getAlignment());
1439 DesiredLVal =
1440 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1441 AtomicLVal.getType(), AtomicLVal.getAlignment());
1442 } else if (AtomicLVal.isVectorElt()) {
1443 UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1444 AtomicLVal.getType(),
1445 AtomicLVal.getAlignment());
1446 DesiredLVal = LValue::MakeVectorElt(
1447 DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1448 AtomicLVal.getAlignment());
1449 } else {
1450 assert(AtomicLVal.isExtVectorElt());
1451 UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1452 AtomicLVal.getType(),
1453 AtomicLVal.getAlignment());
1454 DesiredLVal = LValue::MakeExtVectorElt(
1455 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1456 AtomicLVal.getAlignment());
1457 }
1458 UpdateLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
1459 DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
1460 UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1461 }
1462 // Store new value in the corresponding memory area
1463 RValue NewRVal = UpdateOp(UpRVal);
1464 if (NewRVal.isScalar()) {
1465 CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1466 } else {
1467 assert(NewRVal.isComplex());
1468 CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1469 /*isInit=*/false);
1470 }
1471}
1472
1473void AtomicInfo::EmitAtomicUpdateLibcall(
1474 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1475 bool IsVolatile) {
1476 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1477
1478 llvm::Value *ExpectedAddr = CreateTempAlloca();
1479
1480 EmitAtomicLoadLibcall(ExpectedAddr, AO, IsVolatile);
1481 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1482 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1483 CGF.EmitBlock(ContBB);
1484 auto *DesiredAddr = CreateTempAlloca();
1485 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1486 requiresMemSetZero(
1487 getAtomicAddress()->getType()->getPointerElementType())) {
1488 auto *OldVal = CGF.Builder.CreateAlignedLoad(
1489 ExpectedAddr, getAtomicAlignment().getQuantity());
1490 CGF.Builder.CreateAlignedStore(OldVal, DesiredAddr,
1491 getAtomicAlignment().getQuantity());
1492 }
1493 auto OldRVal = convertTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1494 SourceLocation(), /*AsValue=*/false);
1495 EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1496 auto *Res =
1497 EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, AO, Failure);
1498 CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1499 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1500}
1501
1502void AtomicInfo::EmitAtomicUpdateOp(
1503 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1504 bool IsVolatile) {
1505 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1506
1507 // Do the atomic load.
1508 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1509 // For non-simple lvalues perform compare-and-swap procedure.
1510 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1511 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1512 auto *CurBB = CGF.Builder.GetInsertBlock();
1513 CGF.EmitBlock(ContBB);
1514 llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1515 /*NumReservedValues=*/2);
1516 PHI->addIncoming(OldVal, CurBB);
1517 auto *NewAtomicAddr = CreateTempAlloca();
1518 auto *NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1519 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1520 requiresMemSetZero(
1521 getAtomicAddress()->getType()->getPointerElementType())) {
1522 CGF.Builder.CreateAlignedStore(PHI, NewAtomicIntAddr,
1523 getAtomicAlignment().getQuantity());
1524 }
1525 auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1526 SourceLocation(), /*AsValue=*/false);
1527 EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1528 auto *DesiredVal = CGF.Builder.CreateAlignedLoad(
1529 NewAtomicIntAddr, getAtomicAlignment().getQuantity());
1530 // Try to write new value using cmpxchg operation
1531 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1532 PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1533 CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1534 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1535}
1536
1537static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1538 RValue UpdateRVal, llvm::Value *DesiredAddr) {
1539 LValue AtomicLVal = Atomics.getAtomicLValue();
1540 LValue DesiredLVal;
1541 // Build new lvalue for temp address
1542 if (AtomicLVal.isBitField()) {
1543 DesiredLVal =
1544 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1545 AtomicLVal.getType(), AtomicLVal.getAlignment());
1546 } else if (AtomicLVal.isVectorElt()) {
1547 DesiredLVal =
1548 LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1549 AtomicLVal.getType(), AtomicLVal.getAlignment());
1550 } else {
1551 assert(AtomicLVal.isExtVectorElt());
1552 DesiredLVal = LValue::MakeExtVectorElt(
1553 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1554 AtomicLVal.getAlignment());
1555 }
1556 DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
1557 // Store new value in the corresponding memory area
1558 assert(UpdateRVal.isScalar());
1559 CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1560}
1561
1562void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1563 RValue UpdateRVal, bool IsVolatile) {
1564 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1565
1566 llvm::Value *ExpectedAddr = CreateTempAlloca();
1567
1568 EmitAtomicLoadLibcall(ExpectedAddr, AO, IsVolatile);
1569 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1570 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1571 CGF.EmitBlock(ContBB);
1572 auto *DesiredAddr = CreateTempAlloca();
1573 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1574 requiresMemSetZero(
1575 getAtomicAddress()->getType()->getPointerElementType())) {
1576 auto *OldVal = CGF.Builder.CreateAlignedLoad(
1577 ExpectedAddr, getAtomicAlignment().getQuantity());
1578 CGF.Builder.CreateAlignedStore(OldVal, DesiredAddr,
1579 getAtomicAlignment().getQuantity());
1580 }
1581 EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1582 auto *Res =
1583 EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, AO, Failure);
1584 CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1585 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1586}
1587
1588void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1589 bool IsVolatile) {
1590 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1591
1592 // Do the atomic load.
1593 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1594 // For non-simple lvalues perform compare-and-swap procedure.
1595 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1596 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1597 auto *CurBB = CGF.Builder.GetInsertBlock();
1598 CGF.EmitBlock(ContBB);
1599 llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1600 /*NumReservedValues=*/2);
1601 PHI->addIncoming(OldVal, CurBB);
1602 auto *NewAtomicAddr = CreateTempAlloca();
1603 auto *NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1604 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1605 requiresMemSetZero(
1606 getAtomicAddress()->getType()->getPointerElementType())) {
1607 CGF.Builder.CreateAlignedStore(PHI, NewAtomicIntAddr,
1608 getAtomicAlignment().getQuantity());
1609 }
1610 EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1611 auto *DesiredVal = CGF.Builder.CreateAlignedLoad(
1612 NewAtomicIntAddr, getAtomicAlignment().getQuantity());
1613 // Try to write new value using cmpxchg operation
1614 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1615 PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1616 CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1617 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1618}
1619
1620void AtomicInfo::EmitAtomicUpdate(
1621 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1622 bool IsVolatile) {
1623 if (shouldUseLibcall()) {
1624 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1625 } else {
1626 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1627 }
1628}
1629
1630void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1631 bool IsVolatile) {
1632 if (shouldUseLibcall()) {
1633 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1634 } else {
1635 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1636 }
Alexey Bataevb8329262015-02-27 06:33:30 +00001637}
1638
David Majnemera5b195a2015-02-14 01:35:12 +00001639void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
1640 bool isInit) {
1641 bool IsVolatile = lvalue.isVolatileQualified();
1642 llvm::AtomicOrdering AO;
1643 if (lvalue.getType()->isAtomicType()) {
1644 AO = llvm::SequentiallyConsistent;
1645 } else {
1646 AO = llvm::Release;
1647 IsVolatile = true;
1648 }
1649 return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1650}
1651
John McCalla8ec7eb2013-03-07 21:37:17 +00001652/// Emit a store to an l-value of atomic type.
1653///
1654/// Note that the r-value is expected to be an r-value *of the atomic
1655/// type*; this means that for aggregate r-values, it should include
1656/// storage for any padding that was necessary.
David Majnemera5b195a2015-02-14 01:35:12 +00001657void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
1658 llvm::AtomicOrdering AO, bool IsVolatile,
1659 bool isInit) {
John McCalla8ec7eb2013-03-07 21:37:17 +00001660 // If this is an aggregate r-value, it should agree in type except
1661 // maybe for address-space qualification.
1662 assert(!rvalue.isAggregate() ||
1663 rvalue.getAggregateAddr()->getType()->getPointerElementType()
1664 == dest.getAddress()->getType()->getPointerElementType());
1665
1666 AtomicInfo atomics(*this, dest);
Alexey Bataevb8329262015-02-27 06:33:30 +00001667 LValue LVal = atomics.getAtomicLValue();
John McCalla8ec7eb2013-03-07 21:37:17 +00001668
1669 // If this is an initialization, just put the value there normally.
Alexey Bataevb8329262015-02-27 06:33:30 +00001670 if (LVal.isSimple()) {
1671 if (isInit) {
1672 atomics.emitCopyIntoMemory(rvalue);
1673 return;
1674 }
1675
1676 // Check whether we should use a library call.
1677 if (atomics.shouldUseLibcall()) {
1678 // Produce a source address.
1679 llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
1680
1681 // void __atomic_store(size_t size, void *mem, void *val, int order)
1682 CallArgList args;
1683 args.add(RValue::get(atomics.getAtomicSizeValue()),
1684 getContext().getSizeType());
1685 args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicAddress())),
1686 getContext().VoidPtrTy);
1687 args.add(RValue::get(EmitCastToVoidPtr(srcAddr)), getContext().VoidPtrTy);
1688 args.add(RValue::get(llvm::ConstantInt::get(
1689 IntTy, AtomicInfo::translateAtomicOrdering(AO))),
1690 getContext().IntTy);
1691 emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1692 return;
1693 }
1694
1695 // Okay, we're doing this natively.
1696 llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
1697
1698 // Do the atomic store.
1699 llvm::Value *addr =
1700 atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
1701 intValue = Builder.CreateIntCast(
1702 intValue, addr->getType()->getPointerElementType(), /*isSigned=*/false);
1703 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1704
1705 // Initializations don't need to be atomic.
1706 if (!isInit)
1707 store->setAtomic(AO);
1708
1709 // Other decoration.
1710 store->setAlignment(dest.getAlignment().getQuantity());
1711 if (IsVolatile)
1712 store->setVolatile(true);
1713 if (dest.getTBAAInfo())
1714 CGM.DecorateInstruction(store, dest.getTBAAInfo());
John McCalla8ec7eb2013-03-07 21:37:17 +00001715 return;
1716 }
1717
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001718 // Emit simple atomic update operation.
1719 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
John McCalla8ec7eb2013-03-07 21:37:17 +00001720}
1721
Alexey Bataev452d8e12014-12-15 05:25:25 +00001722/// Emit a compare-and-exchange op for atomic type.
1723///
Alexey Bataevb4505a72015-03-30 05:20:59 +00001724std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
Alexey Bataev452d8e12014-12-15 05:25:25 +00001725 LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
1726 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
1727 AggValueSlot Slot) {
1728 // If this is an aggregate r-value, it should agree in type except
1729 // maybe for address-space qualification.
1730 assert(!Expected.isAggregate() ||
1731 Expected.getAggregateAddr()->getType()->getPointerElementType() ==
1732 Obj.getAddress()->getType()->getPointerElementType());
1733 assert(!Desired.isAggregate() ||
1734 Desired.getAggregateAddr()->getType()->getPointerElementType() ==
1735 Obj.getAddress()->getType()->getPointerElementType());
1736 AtomicInfo Atomics(*this, Obj);
1737
Alexey Bataevb4505a72015-03-30 05:20:59 +00001738 return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
1739 IsWeak);
1740}
1741
1742void CodeGenFunction::EmitAtomicUpdate(
1743 LValue LVal, llvm::AtomicOrdering AO,
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001744 const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
Alexey Bataevb4505a72015-03-30 05:20:59 +00001745 AtomicInfo Atomics(*this, LVal);
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001746 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
Alexey Bataev452d8e12014-12-15 05:25:25 +00001747}
1748
John McCalla8ec7eb2013-03-07 21:37:17 +00001749void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1750 AtomicInfo atomics(*this, dest);
1751
1752 switch (atomics.getEvaluationKind()) {
1753 case TEK_Scalar: {
1754 llvm::Value *value = EmitScalarExpr(init);
Alexey Bataevb57056f2015-01-22 06:17:56 +00001755 atomics.emitCopyIntoMemory(RValue::get(value));
John McCalla8ec7eb2013-03-07 21:37:17 +00001756 return;
1757 }
1758
1759 case TEK_Complex: {
1760 ComplexPairTy value = EmitComplexExpr(init);
Alexey Bataevb57056f2015-01-22 06:17:56 +00001761 atomics.emitCopyIntoMemory(RValue::getComplex(value));
John McCalla8ec7eb2013-03-07 21:37:17 +00001762 return;
1763 }
1764
1765 case TEK_Aggregate: {
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001766 // Fix up the destination if the initializer isn't an expression
1767 // of atomic type.
1768 bool Zeroed = false;
John McCalla8ec7eb2013-03-07 21:37:17 +00001769 if (!init->getType()->isAtomicType()) {
Alexey Bataevb57056f2015-01-22 06:17:56 +00001770 Zeroed = atomics.emitMemSetZeroIfNecessary();
1771 dest = atomics.projectValue();
John McCalla8ec7eb2013-03-07 21:37:17 +00001772 }
1773
1774 // Evaluate the expression directly into the destination.
1775 AggValueSlot slot = AggValueSlot::forLValue(dest,
1776 AggValueSlot::IsNotDestructed,
1777 AggValueSlot::DoesNotNeedGCBarriers,
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001778 AggValueSlot::IsNotAliased,
1779 Zeroed ? AggValueSlot::IsZeroed :
1780 AggValueSlot::IsNotZeroed);
1781
John McCalla8ec7eb2013-03-07 21:37:17 +00001782 EmitAggExpr(init, slot);
1783 return;
1784 }
1785 }
1786 llvm_unreachable("bad evaluation kind");
1787}