blob: 626f6a681808483a6bb14854bc3a57625c66edf9 [file] [log] [blame]
John McCallfc207f22013-03-07 21:37:12 +00001//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the code for emitting atomic operations.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCall.h"
Alexey Bataevb57056f2015-01-22 06:17:56 +000016#include "CGRecordLayout.h"
John McCallfc207f22013-03-07 21:37:12 +000017#include "CodeGenModule.h"
18#include "clang/AST/ASTContext.h"
Mark Laceya8e7df32013-10-30 21:53:58 +000019#include "clang/CodeGen/CGFunctionInfo.h"
Ed Schoutenc7e82bd2013-05-31 19:27:59 +000020#include "llvm/ADT/StringExtras.h"
John McCallfc207f22013-03-07 21:37:12 +000021#include "llvm/IR/DataLayout.h"
22#include "llvm/IR/Intrinsics.h"
John McCalla8ec7eb2013-03-07 21:37:17 +000023#include "llvm/IR/Operator.h"
John McCallfc207f22013-03-07 21:37:12 +000024
25using namespace clang;
26using namespace CodeGen;
27
John McCalla8ec7eb2013-03-07 21:37:17 +000028namespace {
29 class AtomicInfo {
30 CodeGenFunction &CGF;
31 QualType AtomicTy;
32 QualType ValueTy;
33 uint64_t AtomicSizeInBits;
34 uint64_t ValueSizeInBits;
35 CharUnits AtomicAlign;
36 CharUnits ValueAlign;
37 CharUnits LValueAlign;
38 TypeEvaluationKind EvaluationKind;
39 bool UseLibcall;
Alexey Bataevb57056f2015-01-22 06:17:56 +000040 LValue LVal;
41 CGBitFieldInfo BFI;
John McCalla8ec7eb2013-03-07 21:37:17 +000042 public:
Alexey Bataevb57056f2015-01-22 06:17:56 +000043 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45 EvaluationKind(TEK_Scalar), UseLibcall(true) {
46 assert(!lvalue.isGlobalReg());
John McCalla8ec7eb2013-03-07 21:37:17 +000047 ASTContext &C = CGF.getContext();
Alexey Bataevb57056f2015-01-22 06:17:56 +000048 if (lvalue.isSimple()) {
49 AtomicTy = lvalue.getType();
50 if (auto *ATy = AtomicTy->getAs<AtomicType>())
51 ValueTy = ATy->getValueType();
52 else
53 ValueTy = AtomicTy;
54 EvaluationKind = CGF.getEvaluationKind(ValueTy);
John McCalla8ec7eb2013-03-07 21:37:17 +000055
Alexey Bataevb57056f2015-01-22 06:17:56 +000056 uint64_t ValueAlignInBits;
57 uint64_t AtomicAlignInBits;
58 TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59 ValueSizeInBits = ValueTI.Width;
60 ValueAlignInBits = ValueTI.Align;
John McCalla8ec7eb2013-03-07 21:37:17 +000061
Alexey Bataevb57056f2015-01-22 06:17:56 +000062 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63 AtomicSizeInBits = AtomicTI.Width;
64 AtomicAlignInBits = AtomicTI.Align;
John McCalla8ec7eb2013-03-07 21:37:17 +000065
Alexey Bataevb57056f2015-01-22 06:17:56 +000066 assert(ValueSizeInBits <= AtomicSizeInBits);
67 assert(ValueAlignInBits <= AtomicAlignInBits);
John McCalla8ec7eb2013-03-07 21:37:17 +000068
Alexey Bataevb57056f2015-01-22 06:17:56 +000069 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71 if (lvalue.getAlignment().isZero())
72 lvalue.setAlignment(AtomicAlign);
John McCalla8ec7eb2013-03-07 21:37:17 +000073
Alexey Bataevb57056f2015-01-22 06:17:56 +000074 LVal = lvalue;
75 } else if (lvalue.isBitField()) {
Alexey Bataevb8329262015-02-27 06:33:30 +000076 ValueTy = lvalue.getType();
77 ValueSizeInBits = C.getTypeSize(ValueTy);
Alexey Bataevb57056f2015-01-22 06:17:56 +000078 auto &OrigBFI = lvalue.getBitFieldInfo();
79 auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
80 AtomicSizeInBits = C.toBits(
81 C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
Rui Ueyama83aa9792016-01-14 21:00:27 +000082 .alignTo(lvalue.getAlignment()));
John McCall7f416cc2015-09-08 08:05:57 +000083 auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
Alexey Bataevb57056f2015-01-22 06:17:56 +000084 auto OffsetInChars =
85 (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
86 lvalue.getAlignment();
87 VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
88 VoidPtrAddr, OffsetInChars.getQuantity());
89 auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
90 VoidPtrAddr,
91 CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92 "atomic_bitfield_base");
93 BFI = OrigBFI;
94 BFI.Offset = Offset;
95 BFI.StorageSize = AtomicSizeInBits;
Ulrich Weigand03ce2a12015-07-10 17:30:00 +000096 BFI.StorageOffset += OffsetInChars;
John McCall7f416cc2015-09-08 08:05:57 +000097 LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
98 BFI, lvalue.getType(),
99 lvalue.getAlignmentSource());
Alexey Bataevb8329262015-02-27 06:33:30 +0000100 LVal.setTBAAInfo(lvalue.getTBAAInfo());
101 AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
102 if (AtomicTy.isNull()) {
103 llvm::APInt Size(
104 /*numBits=*/32,
105 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
106 AtomicTy = C.getConstantArrayType(C.CharTy, Size, ArrayType::Normal,
107 /*IndexTypeQuals=*/0);
108 }
109 AtomicAlign = ValueAlign = lvalue.getAlignment();
Alexey Bataevb57056f2015-01-22 06:17:56 +0000110 } else if (lvalue.isVectorElt()) {
Alexey Bataevb8329262015-02-27 06:33:30 +0000111 ValueTy = lvalue.getType()->getAs<VectorType>()->getElementType();
112 ValueSizeInBits = C.getTypeSize(ValueTy);
113 AtomicTy = lvalue.getType();
114 AtomicSizeInBits = C.getTypeSize(AtomicTy);
115 AtomicAlign = ValueAlign = lvalue.getAlignment();
Alexey Bataevb57056f2015-01-22 06:17:56 +0000116 LVal = lvalue;
117 } else {
118 assert(lvalue.isExtVectorElt());
Alexey Bataevb8329262015-02-27 06:33:30 +0000119 ValueTy = lvalue.getType();
120 ValueSizeInBits = C.getTypeSize(ValueTy);
121 AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
John McCall7f416cc2015-09-08 08:05:57 +0000122 lvalue.getType(), lvalue.getExtVectorAddress()
123 .getElementType()->getVectorNumElements());
Alexey Bataevb8329262015-02-27 06:33:30 +0000124 AtomicSizeInBits = C.getTypeSize(AtomicTy);
125 AtomicAlign = ValueAlign = lvalue.getAlignment();
Alexey Bataevb57056f2015-01-22 06:17:56 +0000126 LVal = lvalue;
127 }
Alexey Bataev452d8e12014-12-15 05:25:25 +0000128 UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
129 AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
John McCalla8ec7eb2013-03-07 21:37:17 +0000130 }
131
132 QualType getAtomicType() const { return AtomicTy; }
133 QualType getValueType() const { return ValueTy; }
134 CharUnits getAtomicAlignment() const { return AtomicAlign; }
135 CharUnits getValueAlignment() const { return ValueAlign; }
136 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
Alexey Bataev452d8e12014-12-15 05:25:25 +0000137 uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
John McCalla8ec7eb2013-03-07 21:37:17 +0000138 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
139 bool shouldUseLibcall() const { return UseLibcall; }
Alexey Bataevb57056f2015-01-22 06:17:56 +0000140 const LValue &getAtomicLValue() const { return LVal; }
John McCall7f416cc2015-09-08 08:05:57 +0000141 llvm::Value *getAtomicPointer() const {
Alexey Bataevb8329262015-02-27 06:33:30 +0000142 if (LVal.isSimple())
John McCall7f416cc2015-09-08 08:05:57 +0000143 return LVal.getPointer();
Alexey Bataevb8329262015-02-27 06:33:30 +0000144 else if (LVal.isBitField())
John McCall7f416cc2015-09-08 08:05:57 +0000145 return LVal.getBitFieldPointer();
Alexey Bataevb8329262015-02-27 06:33:30 +0000146 else if (LVal.isVectorElt())
John McCall7f416cc2015-09-08 08:05:57 +0000147 return LVal.getVectorPointer();
Alexey Bataevb8329262015-02-27 06:33:30 +0000148 assert(LVal.isExtVectorElt());
John McCall7f416cc2015-09-08 08:05:57 +0000149 return LVal.getExtVectorPointer();
150 }
151 Address getAtomicAddress() const {
152 return Address(getAtomicPointer(), getAtomicAlignment());
153 }
154
155 Address getAtomicAddressAsAtomicIntPointer() const {
156 return emitCastToAtomicIntPointer(getAtomicAddress());
Alexey Bataevb8329262015-02-27 06:33:30 +0000157 }
John McCalla8ec7eb2013-03-07 21:37:17 +0000158
159 /// Is the atomic size larger than the underlying value type?
160 ///
161 /// Note that the absence of padding does not mean that atomic
162 /// objects are completely interchangeable with non-atomic
163 /// objects: we might have promoted the alignment of a type
164 /// without making it bigger.
165 bool hasPadding() const {
166 return (ValueSizeInBits != AtomicSizeInBits);
167 }
168
Alexey Bataevb57056f2015-01-22 06:17:56 +0000169 bool emitMemSetZeroIfNecessary() const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000170
171 llvm::Value *getAtomicSizeValue() const {
172 CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
173 return CGF.CGM.getSize(size);
174 }
175
Tim Northovercc2a6e02015-11-09 19:56:35 +0000176 /// Cast the given pointer to an integer pointer suitable for atomic
177 /// operations if the source.
178 Address emitCastToAtomicIntPointer(Address Addr) const;
179
180 /// If Addr is compatible with the iN that will be used for an atomic
181 /// operation, bitcast it. Otherwise, create a temporary that is suitable
182 /// and copy the value across.
183 Address convertToAtomicIntPointer(Address Addr) const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000184
185 /// Turn an atomic-layout object into an r-value.
John McCall7f416cc2015-09-08 08:05:57 +0000186 RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
187 SourceLocation loc, bool AsValue) const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000188
Alexey Bataev452d8e12014-12-15 05:25:25 +0000189 /// \brief Converts a rvalue to integer value.
190 llvm::Value *convertRValueToInt(RValue RVal) const;
191
Alexey Bataevb8329262015-02-27 06:33:30 +0000192 RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
193 AggValueSlot ResultSlot,
194 SourceLocation Loc, bool AsValue) const;
Alexey Bataev452d8e12014-12-15 05:25:25 +0000195
John McCalla8ec7eb2013-03-07 21:37:17 +0000196 /// Copy an atomic r-value into atomic-layout memory.
Alexey Bataevb57056f2015-01-22 06:17:56 +0000197 void emitCopyIntoMemory(RValue rvalue) const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000198
199 /// Project an l-value down to the value field.
Alexey Bataevb57056f2015-01-22 06:17:56 +0000200 LValue projectValue() const {
201 assert(LVal.isSimple());
John McCall7f416cc2015-09-08 08:05:57 +0000202 Address addr = getAtomicAddress();
John McCalla8ec7eb2013-03-07 21:37:17 +0000203 if (hasPadding())
John McCall7f416cc2015-09-08 08:05:57 +0000204 addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
John McCalla8ec7eb2013-03-07 21:37:17 +0000205
John McCall7f416cc2015-09-08 08:05:57 +0000206 return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
207 LVal.getAlignmentSource(), LVal.getTBAAInfo());
John McCalla8ec7eb2013-03-07 21:37:17 +0000208 }
209
Alexey Bataevb8329262015-02-27 06:33:30 +0000210 /// \brief Emits atomic load.
211 /// \returns Loaded value.
212 RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
213 bool AsValue, llvm::AtomicOrdering AO,
214 bool IsVolatile);
215
216 /// \brief Emits atomic compare-and-exchange sequence.
217 /// \param Expected Expected value.
218 /// \param Desired Desired value.
219 /// \param Success Atomic ordering for success operation.
220 /// \param Failure Atomic ordering for failed operation.
221 /// \param IsWeak true if atomic operation is weak, false otherwise.
222 /// \returns Pair of values: previous value from storage (value type) and
223 /// boolean flag (i1 type) with true if success and false otherwise.
JF Bastien92f4ef12016-04-06 17:26:42 +0000224 std::pair<RValue, llvm::Value *>
225 EmitAtomicCompareExchange(RValue Expected, RValue Desired,
226 llvm::AtomicOrdering Success =
227 llvm::AtomicOrdering::SequentiallyConsistent,
228 llvm::AtomicOrdering Failure =
229 llvm::AtomicOrdering::SequentiallyConsistent,
230 bool IsWeak = false);
Alexey Bataevb8329262015-02-27 06:33:30 +0000231
Alexey Bataevf0ab5532015-05-15 08:36:34 +0000232 /// \brief Emits atomic update.
NAKAMURA Takumid16af5d2015-05-15 13:47:52 +0000233 /// \param AO Atomic ordering.
234 /// \param UpdateOp Update operation for the current lvalue.
Alexey Bataevf0ab5532015-05-15 08:36:34 +0000235 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
236 const llvm::function_ref<RValue(RValue)> &UpdateOp,
237 bool IsVolatile);
238 /// \brief Emits atomic update.
NAKAMURA Takumid16af5d2015-05-15 13:47:52 +0000239 /// \param AO Atomic ordering.
Alexey Bataevf0ab5532015-05-15 08:36:34 +0000240 void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
241 bool IsVolatile);
242
John McCalla8ec7eb2013-03-07 21:37:17 +0000243 /// Materialize an atomic r-value in atomic-layout memory.
John McCall7f416cc2015-09-08 08:05:57 +0000244 Address materializeRValue(RValue rvalue) const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000245
Alexey Bataevb8329262015-02-27 06:33:30 +0000246 /// \brief Translates LLVM atomic ordering to GNU atomic ordering for
247 /// libcalls.
248 static AtomicExpr::AtomicOrderingKind
249 translateAtomicOrdering(const llvm::AtomicOrdering AO);
250
Tim Northovercc2a6e02015-11-09 19:56:35 +0000251 /// \brief Creates temp alloca for intermediate operations on atomic value.
252 Address CreateTempAlloca() const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000253 private:
254 bool requiresMemSetZero(llvm::Type *type) const;
Alexey Bataevb8329262015-02-27 06:33:30 +0000255
Alexey Bataevb8329262015-02-27 06:33:30 +0000256
257 /// \brief Emits atomic load as a libcall.
258 void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
259 llvm::AtomicOrdering AO, bool IsVolatile);
260 /// \brief Emits atomic load as LLVM instruction.
261 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
262 /// \brief Emits atomic compare-and-exchange op as a libcall.
Alexey Bataevf0ab5532015-05-15 08:36:34 +0000263 llvm::Value *EmitAtomicCompareExchangeLibcall(
264 llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
JF Bastien92f4ef12016-04-06 17:26:42 +0000265 llvm::AtomicOrdering Success =
266 llvm::AtomicOrdering::SequentiallyConsistent,
267 llvm::AtomicOrdering Failure =
268 llvm::AtomicOrdering::SequentiallyConsistent);
Alexey Bataevb8329262015-02-27 06:33:30 +0000269 /// \brief Emits atomic compare-and-exchange op as LLVM instruction.
Alexey Bataevf0ab5532015-05-15 08:36:34 +0000270 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
271 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
JF Bastien92f4ef12016-04-06 17:26:42 +0000272 llvm::AtomicOrdering Success =
273 llvm::AtomicOrdering::SequentiallyConsistent,
274 llvm::AtomicOrdering Failure =
275 llvm::AtomicOrdering::SequentiallyConsistent,
Alexey Bataevb8329262015-02-27 06:33:30 +0000276 bool IsWeak = false);
Alexey Bataevf0ab5532015-05-15 08:36:34 +0000277 /// \brief Emit atomic update as libcalls.
278 void
279 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
280 const llvm::function_ref<RValue(RValue)> &UpdateOp,
281 bool IsVolatile);
282 /// \brief Emit atomic update as LLVM instructions.
283 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
284 const llvm::function_ref<RValue(RValue)> &UpdateOp,
285 bool IsVolatile);
286 /// \brief Emit atomic update as libcalls.
287 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
288 bool IsVolatile);
289 /// \brief Emit atomic update as LLVM instructions.
290 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
291 bool IsVolatile);
John McCalla8ec7eb2013-03-07 21:37:17 +0000292 };
Alexander Kornienkoab9db512015-06-22 23:07:51 +0000293}
John McCalla8ec7eb2013-03-07 21:37:17 +0000294
Alexey Bataevb8329262015-02-27 06:33:30 +0000295AtomicExpr::AtomicOrderingKind
296AtomicInfo::translateAtomicOrdering(const llvm::AtomicOrdering AO) {
297 switch (AO) {
JF Bastien92f4ef12016-04-06 17:26:42 +0000298 case llvm::AtomicOrdering::Unordered:
299 case llvm::AtomicOrdering::NotAtomic:
300 case llvm::AtomicOrdering::Monotonic:
Alexey Bataevb8329262015-02-27 06:33:30 +0000301 return AtomicExpr::AO_ABI_memory_order_relaxed;
JF Bastien92f4ef12016-04-06 17:26:42 +0000302 case llvm::AtomicOrdering::Acquire:
Alexey Bataevb8329262015-02-27 06:33:30 +0000303 return AtomicExpr::AO_ABI_memory_order_acquire;
JF Bastien92f4ef12016-04-06 17:26:42 +0000304 case llvm::AtomicOrdering::Release:
Alexey Bataevb8329262015-02-27 06:33:30 +0000305 return AtomicExpr::AO_ABI_memory_order_release;
JF Bastien92f4ef12016-04-06 17:26:42 +0000306 case llvm::AtomicOrdering::AcquireRelease:
Alexey Bataevb8329262015-02-27 06:33:30 +0000307 return AtomicExpr::AO_ABI_memory_order_acq_rel;
JF Bastien92f4ef12016-04-06 17:26:42 +0000308 case llvm::AtomicOrdering::SequentiallyConsistent:
Alexey Bataevb8329262015-02-27 06:33:30 +0000309 return AtomicExpr::AO_ABI_memory_order_seq_cst;
310 }
Aaron Ballman152ad172015-02-27 13:55:58 +0000311 llvm_unreachable("Unhandled AtomicOrdering");
Alexey Bataevb8329262015-02-27 06:33:30 +0000312}
313
John McCall7f416cc2015-09-08 08:05:57 +0000314Address AtomicInfo::CreateTempAlloca() const {
315 Address TempAlloca = CGF.CreateMemTemp(
Alexey Bataevb8329262015-02-27 06:33:30 +0000316 (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
317 : AtomicTy,
John McCall7f416cc2015-09-08 08:05:57 +0000318 getAtomicAlignment(),
Alexey Bataevb8329262015-02-27 06:33:30 +0000319 "atomic-temp");
Alexey Bataevb8329262015-02-27 06:33:30 +0000320 // Cast to pointer to value type for bitfields.
321 if (LVal.isBitField())
322 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
John McCall7f416cc2015-09-08 08:05:57 +0000323 TempAlloca, getAtomicAddress().getType());
Alexey Bataevb8329262015-02-27 06:33:30 +0000324 return TempAlloca;
325}
326
John McCalla8ec7eb2013-03-07 21:37:17 +0000327static RValue emitAtomicLibcall(CodeGenFunction &CGF,
328 StringRef fnName,
329 QualType resultType,
330 CallArgList &args) {
331 const CGFunctionInfo &fnInfo =
John McCallc56a8b32016-03-11 04:30:31 +0000332 CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
John McCalla8ec7eb2013-03-07 21:37:17 +0000333 llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
334 llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
335 return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
336}
337
338/// Does a store of the given IR type modify the full expected width?
339static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
340 uint64_t expectedSize) {
341 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
342}
343
344/// Does the atomic type require memsetting to zero before initialization?
345///
346/// The IR type is provided as a way of making certain queries faster.
347bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
348 // If the atomic type has size padding, we definitely need a memset.
349 if (hasPadding()) return true;
350
351 // Otherwise, do some simple heuristics to try to avoid it:
352 switch (getEvaluationKind()) {
353 // For scalars and complexes, check whether the store size of the
354 // type uses the full size.
355 case TEK_Scalar:
356 return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
357 case TEK_Complex:
358 return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
359 AtomicSizeInBits / 2);
360
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000361 // Padding in structs has an undefined bit pattern. User beware.
John McCalla8ec7eb2013-03-07 21:37:17 +0000362 case TEK_Aggregate:
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000363 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000364 }
365 llvm_unreachable("bad evaluation kind");
366}
367
Alexey Bataevb57056f2015-01-22 06:17:56 +0000368bool AtomicInfo::emitMemSetZeroIfNecessary() const {
369 assert(LVal.isSimple());
John McCall7f416cc2015-09-08 08:05:57 +0000370 llvm::Value *addr = LVal.getPointer();
John McCalla8ec7eb2013-03-07 21:37:17 +0000371 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000372 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000373
Alexey Bataevb8329262015-02-27 06:33:30 +0000374 CGF.Builder.CreateMemSet(
375 addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
376 CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
377 LVal.getAlignment().getQuantity());
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000378 return true;
John McCalla8ec7eb2013-03-07 21:37:17 +0000379}
380
Tim Northovercadbbe12014-06-13 19:43:04 +0000381static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
John McCall7f416cc2015-09-08 08:05:57 +0000382 Address Dest, Address Ptr,
383 Address Val1, Address Val2,
384 uint64_t Size,
Tim Northover9c177222014-03-13 19:25:48 +0000385 llvm::AtomicOrdering SuccessOrder,
386 llvm::AtomicOrdering FailureOrder) {
387 // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
John McCall7f416cc2015-09-08 08:05:57 +0000388 llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
389 llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
Tim Northover9c177222014-03-13 19:25:48 +0000390
Tim Northoverb49b04b2014-06-13 14:24:59 +0000391 llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
John McCall7f416cc2015-09-08 08:05:57 +0000392 Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder);
Tim Northoverb49b04b2014-06-13 14:24:59 +0000393 Pair->setVolatile(E->isVolatile());
Tim Northovercadbbe12014-06-13 19:43:04 +0000394 Pair->setWeak(IsWeak);
Tim Northover9c177222014-03-13 19:25:48 +0000395
396 // Cmp holds the result of the compare-exchange operation: true on success,
397 // false on failure.
Tim Northoverb49b04b2014-06-13 14:24:59 +0000398 llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
399 llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
Tim Northover9c177222014-03-13 19:25:48 +0000400
401 // This basic block is used to hold the store instruction if the operation
402 // failed.
403 llvm::BasicBlock *StoreExpectedBB =
404 CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
405
406 // This basic block is the exit point of the operation, we should end up
407 // here regardless of whether or not the operation succeeded.
408 llvm::BasicBlock *ContinueBB =
409 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
410
411 // Update Expected if Expected isn't equal to Old, otherwise branch to the
412 // exit point.
413 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
414
415 CGF.Builder.SetInsertPoint(StoreExpectedBB);
416 // Update the memory at Expected with Old's value.
John McCall7f416cc2015-09-08 08:05:57 +0000417 CGF.Builder.CreateStore(Old, Val1);
Tim Northover9c177222014-03-13 19:25:48 +0000418 // Finally, branch to the exit point.
419 CGF.Builder.CreateBr(ContinueBB);
420
421 CGF.Builder.SetInsertPoint(ContinueBB);
422 // Update the memory at Dest with Cmp's value.
423 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
Tim Northover9c177222014-03-13 19:25:48 +0000424}
425
426/// Given an ordering required on success, emit all possible cmpxchg
427/// instructions to cope with the provided (but possibly only dynamically known)
428/// FailureOrder.
429static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
John McCall7f416cc2015-09-08 08:05:57 +0000430 bool IsWeak, Address Dest,
431 Address Ptr, Address Val1,
432 Address Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000433 llvm::Value *FailureOrderVal,
John McCall7f416cc2015-09-08 08:05:57 +0000434 uint64_t Size,
Tim Northover9c177222014-03-13 19:25:48 +0000435 llvm::AtomicOrdering SuccessOrder) {
436 llvm::AtomicOrdering FailureOrder;
437 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
438 switch (FO->getSExtValue()) {
439 default:
JF Bastien92f4ef12016-04-06 17:26:42 +0000440 FailureOrder = llvm::AtomicOrdering::Monotonic;
Tim Northover9c177222014-03-13 19:25:48 +0000441 break;
442 case AtomicExpr::AO_ABI_memory_order_consume:
443 case AtomicExpr::AO_ABI_memory_order_acquire:
JF Bastien92f4ef12016-04-06 17:26:42 +0000444 FailureOrder = llvm::AtomicOrdering::Acquire;
Tim Northover9c177222014-03-13 19:25:48 +0000445 break;
446 case AtomicExpr::AO_ABI_memory_order_seq_cst:
JF Bastien92f4ef12016-04-06 17:26:42 +0000447 FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
Tim Northover9c177222014-03-13 19:25:48 +0000448 break;
449 }
450 if (FailureOrder >= SuccessOrder) {
451 // Don't assert on undefined behaviour.
452 FailureOrder =
453 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
454 }
John McCall7f416cc2015-09-08 08:05:57 +0000455 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size,
Tim Northovercadbbe12014-06-13 19:43:04 +0000456 SuccessOrder, FailureOrder);
Tim Northover9c177222014-03-13 19:25:48 +0000457 return;
458 }
459
460 // Create all the relevant BB's
Craig Topper8a13c412014-05-21 05:09:00 +0000461 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
462 *SeqCstBB = nullptr;
Tim Northover9c177222014-03-13 19:25:48 +0000463 MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
JF Bastien92f4ef12016-04-06 17:26:42 +0000464 if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
465 SuccessOrder != llvm::AtomicOrdering::Release)
Tim Northover9c177222014-03-13 19:25:48 +0000466 AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
JF Bastien92f4ef12016-04-06 17:26:42 +0000467 if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
Tim Northover9c177222014-03-13 19:25:48 +0000468 SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
469
470 llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
471
472 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
473
474 // Emit all the different atomics
475
476 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
477 // doesn't matter unless someone is crazy enough to use something that
478 // doesn't fold to a constant for the ordering.
479 CGF.Builder.SetInsertPoint(MonotonicBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000480 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
JF Bastien92f4ef12016-04-06 17:26:42 +0000481 Size, SuccessOrder, llvm::AtomicOrdering::Monotonic);
Tim Northover9c177222014-03-13 19:25:48 +0000482 CGF.Builder.CreateBr(ContBB);
483
484 if (AcquireBB) {
485 CGF.Builder.SetInsertPoint(AcquireBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000486 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
JF Bastien92f4ef12016-04-06 17:26:42 +0000487 Size, SuccessOrder, llvm::AtomicOrdering::Acquire);
Tim Northover9c177222014-03-13 19:25:48 +0000488 CGF.Builder.CreateBr(ContBB);
489 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
490 AcquireBB);
491 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
492 AcquireBB);
493 }
494 if (SeqCstBB) {
495 CGF.Builder.SetInsertPoint(SeqCstBB);
JF Bastien92f4ef12016-04-06 17:26:42 +0000496 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
497 llvm::AtomicOrdering::SequentiallyConsistent);
Tim Northover9c177222014-03-13 19:25:48 +0000498 CGF.Builder.CreateBr(ContBB);
499 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
500 SeqCstBB);
501 }
502
503 CGF.Builder.SetInsertPoint(ContBB);
504}
505
John McCall7f416cc2015-09-08 08:05:57 +0000506static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
507 Address Ptr, Address Val1, Address Val2,
Tim Northovercadbbe12014-06-13 19:43:04 +0000508 llvm::Value *IsWeak, llvm::Value *FailureOrder,
John McCall7f416cc2015-09-08 08:05:57 +0000509 uint64_t Size, llvm::AtomicOrdering Order) {
John McCallfc207f22013-03-07 21:37:12 +0000510 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
511 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
512
513 switch (E->getOp()) {
514 case AtomicExpr::AO__c11_atomic_init:
515 llvm_unreachable("Already handled!");
516
517 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
Tim Northovercadbbe12014-06-13 19:43:04 +0000518 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
John McCall7f416cc2015-09-08 08:05:57 +0000519 FailureOrder, Size, Order);
John McCallfc207f22013-03-07 21:37:12 +0000520 return;
Tim Northovercadbbe12014-06-13 19:43:04 +0000521 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
522 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
John McCall7f416cc2015-09-08 08:05:57 +0000523 FailureOrder, Size, Order);
Tim Northovercadbbe12014-06-13 19:43:04 +0000524 return;
525 case AtomicExpr::AO__atomic_compare_exchange:
526 case AtomicExpr::AO__atomic_compare_exchange_n: {
527 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
528 emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
John McCall7f416cc2015-09-08 08:05:57 +0000529 Val1, Val2, FailureOrder, Size, Order);
Tim Northovercadbbe12014-06-13 19:43:04 +0000530 } else {
531 // Create all the relevant BB's
532 llvm::BasicBlock *StrongBB =
533 CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
534 llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
535 llvm::BasicBlock *ContBB =
536 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
537
538 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
539 SI->addCase(CGF.Builder.getInt1(false), StrongBB);
540
541 CGF.Builder.SetInsertPoint(StrongBB);
542 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
John McCall7f416cc2015-09-08 08:05:57 +0000543 FailureOrder, Size, Order);
Tim Northovercadbbe12014-06-13 19:43:04 +0000544 CGF.Builder.CreateBr(ContBB);
545
546 CGF.Builder.SetInsertPoint(WeakBB);
547 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
John McCall7f416cc2015-09-08 08:05:57 +0000548 FailureOrder, Size, Order);
Tim Northovercadbbe12014-06-13 19:43:04 +0000549 CGF.Builder.CreateBr(ContBB);
550
551 CGF.Builder.SetInsertPoint(ContBB);
552 }
553 return;
554 }
John McCallfc207f22013-03-07 21:37:12 +0000555 case AtomicExpr::AO__c11_atomic_load:
556 case AtomicExpr::AO__atomic_load_n:
557 case AtomicExpr::AO__atomic_load: {
558 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
559 Load->setAtomic(Order);
John McCallfc207f22013-03-07 21:37:12 +0000560 Load->setVolatile(E->isVolatile());
John McCall7f416cc2015-09-08 08:05:57 +0000561 CGF.Builder.CreateStore(Load, Dest);
John McCallfc207f22013-03-07 21:37:12 +0000562 return;
563 }
564
565 case AtomicExpr::AO__c11_atomic_store:
566 case AtomicExpr::AO__atomic_store:
567 case AtomicExpr::AO__atomic_store_n: {
John McCall7f416cc2015-09-08 08:05:57 +0000568 llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
John McCallfc207f22013-03-07 21:37:12 +0000569 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
570 Store->setAtomic(Order);
John McCallfc207f22013-03-07 21:37:12 +0000571 Store->setVolatile(E->isVolatile());
572 return;
573 }
574
575 case AtomicExpr::AO__c11_atomic_exchange:
576 case AtomicExpr::AO__atomic_exchange_n:
577 case AtomicExpr::AO__atomic_exchange:
578 Op = llvm::AtomicRMWInst::Xchg;
579 break;
580
581 case AtomicExpr::AO__atomic_add_fetch:
582 PostOp = llvm::Instruction::Add;
583 // Fall through.
584 case AtomicExpr::AO__c11_atomic_fetch_add:
585 case AtomicExpr::AO__atomic_fetch_add:
586 Op = llvm::AtomicRMWInst::Add;
587 break;
588
589 case AtomicExpr::AO__atomic_sub_fetch:
590 PostOp = llvm::Instruction::Sub;
591 // Fall through.
592 case AtomicExpr::AO__c11_atomic_fetch_sub:
593 case AtomicExpr::AO__atomic_fetch_sub:
594 Op = llvm::AtomicRMWInst::Sub;
595 break;
596
597 case AtomicExpr::AO__atomic_and_fetch:
598 PostOp = llvm::Instruction::And;
599 // Fall through.
600 case AtomicExpr::AO__c11_atomic_fetch_and:
601 case AtomicExpr::AO__atomic_fetch_and:
602 Op = llvm::AtomicRMWInst::And;
603 break;
604
605 case AtomicExpr::AO__atomic_or_fetch:
606 PostOp = llvm::Instruction::Or;
607 // Fall through.
608 case AtomicExpr::AO__c11_atomic_fetch_or:
609 case AtomicExpr::AO__atomic_fetch_or:
610 Op = llvm::AtomicRMWInst::Or;
611 break;
612
613 case AtomicExpr::AO__atomic_xor_fetch:
614 PostOp = llvm::Instruction::Xor;
615 // Fall through.
616 case AtomicExpr::AO__c11_atomic_fetch_xor:
617 case AtomicExpr::AO__atomic_fetch_xor:
618 Op = llvm::AtomicRMWInst::Xor;
619 break;
620
621 case AtomicExpr::AO__atomic_nand_fetch:
James Y Knight7aefb5b2015-11-12 18:37:29 +0000622 PostOp = llvm::Instruction::And; // the NOT is special cased below
623 // Fall through.
John McCallfc207f22013-03-07 21:37:12 +0000624 case AtomicExpr::AO__atomic_fetch_nand:
625 Op = llvm::AtomicRMWInst::Nand;
626 break;
627 }
628
John McCall7f416cc2015-09-08 08:05:57 +0000629 llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
John McCallfc207f22013-03-07 21:37:12 +0000630 llvm::AtomicRMWInst *RMWI =
John McCall7f416cc2015-09-08 08:05:57 +0000631 CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order);
John McCallfc207f22013-03-07 21:37:12 +0000632 RMWI->setVolatile(E->isVolatile());
633
634 // For __atomic_*_fetch operations, perform the operation again to
635 // determine the value which was written.
636 llvm::Value *Result = RMWI;
637 if (PostOp)
638 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
639 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
640 Result = CGF.Builder.CreateNot(Result);
John McCall7f416cc2015-09-08 08:05:57 +0000641 CGF.Builder.CreateStore(Result, Dest);
John McCallfc207f22013-03-07 21:37:12 +0000642}
643
644// This function emits any expression (scalar, complex, or aggregate)
645// into a temporary alloca.
John McCall7f416cc2015-09-08 08:05:57 +0000646static Address
John McCallfc207f22013-03-07 21:37:12 +0000647EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
John McCall7f416cc2015-09-08 08:05:57 +0000648 Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
John McCallfc207f22013-03-07 21:37:12 +0000649 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
650 /*Init*/ true);
651 return DeclPtr;
652}
653
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000654static void
655AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000656 bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000657 SourceLocation Loc, CharUnits SizeInChars) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000658 if (UseOptimizedLibcall) {
659 // Load value and pass it to the function directly.
John McCall7f416cc2015-09-08 08:05:57 +0000660 CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
David Majnemer0392cf82014-08-29 07:27:49 +0000661 int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
662 ValTy =
663 CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
664 llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
665 SizeInBits)->getPointerTo();
John McCall7f416cc2015-09-08 08:05:57 +0000666 Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
667 Val = CGF.EmitLoadOfScalar(Ptr, false,
668 CGF.getContext().getPointerType(ValTy),
David Majnemer0392cf82014-08-29 07:27:49 +0000669 Loc);
670 // Coerce the value into an appropriately sized integer type.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000671 Args.add(RValue::get(Val), ValTy);
672 } else {
673 // Non-optimized functions always take a reference.
674 Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
675 CGF.getContext().VoidPtrTy);
676 }
677}
678
Tim Northovercc2a6e02015-11-09 19:56:35 +0000679RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
John McCallfc207f22013-03-07 21:37:12 +0000680 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
681 QualType MemTy = AtomicTy;
682 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
683 MemTy = AT->getValueType();
John McCall7f416cc2015-09-08 08:05:57 +0000684 CharUnits sizeChars, alignChars;
685 std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
John McCallfc207f22013-03-07 21:37:12 +0000686 uint64_t Size = sizeChars.getQuantity();
John McCall7f416cc2015-09-08 08:05:57 +0000687 unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
688 bool UseLibcall = (sizeChars != alignChars ||
John McCallfc207f22013-03-07 21:37:12 +0000689 getContext().toBits(sizeChars) > MaxInlineWidthInBits);
690
John McCall7f416cc2015-09-08 08:05:57 +0000691 llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
692
693 Address Val1 = Address::invalid();
694 Address Val2 = Address::invalid();
Tim Northovercc2a6e02015-11-09 19:56:35 +0000695 Address Dest = Address::invalid();
John McCall7f416cc2015-09-08 08:05:57 +0000696 Address Ptr(EmitScalarExpr(E->getPtr()), alignChars);
John McCallfc207f22013-03-07 21:37:12 +0000697
698 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
John McCall7f416cc2015-09-08 08:05:57 +0000699 LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
John McCalla8ec7eb2013-03-07 21:37:17 +0000700 EmitAtomicInit(E->getVal1(), lvalue);
Craig Topper8a13c412014-05-21 05:09:00 +0000701 return RValue::get(nullptr);
John McCallfc207f22013-03-07 21:37:12 +0000702 }
703
Craig Topper8a13c412014-05-21 05:09:00 +0000704 llvm::Value *Order = EmitScalarExpr(E->getOrder());
John McCallfc207f22013-03-07 21:37:12 +0000705
706 switch (E->getOp()) {
707 case AtomicExpr::AO__c11_atomic_init:
James Y Knight81167fb2015-08-05 16:57:36 +0000708 llvm_unreachable("Already handled above with EmitAtomicInit!");
John McCallfc207f22013-03-07 21:37:12 +0000709
710 case AtomicExpr::AO__c11_atomic_load:
711 case AtomicExpr::AO__atomic_load_n:
712 break;
713
714 case AtomicExpr::AO__atomic_load:
John McCall7f416cc2015-09-08 08:05:57 +0000715 Dest = EmitPointerWithAlignment(E->getVal1());
John McCallfc207f22013-03-07 21:37:12 +0000716 break;
717
718 case AtomicExpr::AO__atomic_store:
John McCall7f416cc2015-09-08 08:05:57 +0000719 Val1 = EmitPointerWithAlignment(E->getVal1());
John McCallfc207f22013-03-07 21:37:12 +0000720 break;
721
722 case AtomicExpr::AO__atomic_exchange:
John McCall7f416cc2015-09-08 08:05:57 +0000723 Val1 = EmitPointerWithAlignment(E->getVal1());
724 Dest = EmitPointerWithAlignment(E->getVal2());
John McCallfc207f22013-03-07 21:37:12 +0000725 break;
726
727 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
728 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
729 case AtomicExpr::AO__atomic_compare_exchange_n:
730 case AtomicExpr::AO__atomic_compare_exchange:
John McCall7f416cc2015-09-08 08:05:57 +0000731 Val1 = EmitPointerWithAlignment(E->getVal1());
John McCallfc207f22013-03-07 21:37:12 +0000732 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
John McCall7f416cc2015-09-08 08:05:57 +0000733 Val2 = EmitPointerWithAlignment(E->getVal2());
John McCallfc207f22013-03-07 21:37:12 +0000734 else
735 Val2 = EmitValToTemp(*this, E->getVal2());
736 OrderFail = EmitScalarExpr(E->getOrderFail());
John McCallfc207f22013-03-07 21:37:12 +0000737 if (E->getNumSubExprs() == 6)
Tim Northovercadbbe12014-06-13 19:43:04 +0000738 IsWeak = EmitScalarExpr(E->getWeak());
John McCallfc207f22013-03-07 21:37:12 +0000739 break;
740
741 case AtomicExpr::AO__c11_atomic_fetch_add:
742 case AtomicExpr::AO__c11_atomic_fetch_sub:
743 if (MemTy->isPointerType()) {
744 // For pointer arithmetic, we're required to do a bit of math:
745 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
746 // ... but only for the C11 builtins. The GNU builtins expect the
747 // user to multiply by sizeof(T).
748 QualType Val1Ty = E->getVal1()->getType();
749 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
750 CharUnits PointeeIncAmt =
751 getContext().getTypeSizeInChars(MemTy->getPointeeType());
752 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
John McCall7f416cc2015-09-08 08:05:57 +0000753 auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
754 Val1 = Temp;
755 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
John McCallfc207f22013-03-07 21:37:12 +0000756 break;
757 }
758 // Fall through.
759 case AtomicExpr::AO__atomic_fetch_add:
760 case AtomicExpr::AO__atomic_fetch_sub:
761 case AtomicExpr::AO__atomic_add_fetch:
762 case AtomicExpr::AO__atomic_sub_fetch:
763 case AtomicExpr::AO__c11_atomic_store:
764 case AtomicExpr::AO__c11_atomic_exchange:
765 case AtomicExpr::AO__atomic_store_n:
766 case AtomicExpr::AO__atomic_exchange_n:
767 case AtomicExpr::AO__c11_atomic_fetch_and:
768 case AtomicExpr::AO__c11_atomic_fetch_or:
769 case AtomicExpr::AO__c11_atomic_fetch_xor:
770 case AtomicExpr::AO__atomic_fetch_and:
771 case AtomicExpr::AO__atomic_fetch_or:
772 case AtomicExpr::AO__atomic_fetch_xor:
773 case AtomicExpr::AO__atomic_fetch_nand:
774 case AtomicExpr::AO__atomic_and_fetch:
775 case AtomicExpr::AO__atomic_or_fetch:
776 case AtomicExpr::AO__atomic_xor_fetch:
777 case AtomicExpr::AO__atomic_nand_fetch:
778 Val1 = EmitValToTemp(*this, E->getVal1());
779 break;
780 }
781
David Majnemeree8d04d2014-12-12 08:16:09 +0000782 QualType RValTy = E->getType().getUnqualifiedType();
783
Tim Northovercc2a6e02015-11-09 19:56:35 +0000784 // The inlined atomics only function on iN types, where N is a power of 2. We
785 // need to make sure (via temporaries if necessary) that all incoming values
786 // are compatible.
787 LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
788 AtomicInfo Atomics(*this, AtomicVal);
789
790 Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
791 if (Val1.isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
792 if (Val2.isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
793 if (Dest.isValid())
794 Dest = Atomics.emitCastToAtomicIntPointer(Dest);
795 else if (E->isCmpXChg())
796 Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
797 else if (!RValTy->isVoidType())
798 Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
John McCallfc207f22013-03-07 21:37:12 +0000799
800 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
801 if (UseLibcall) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000802 bool UseOptimizedLibcall = false;
803 switch (E->getOp()) {
James Y Knight81167fb2015-08-05 16:57:36 +0000804 case AtomicExpr::AO__c11_atomic_init:
805 llvm_unreachable("Already handled above with EmitAtomicInit!");
806
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000807 case AtomicExpr::AO__c11_atomic_fetch_add:
808 case AtomicExpr::AO__atomic_fetch_add:
809 case AtomicExpr::AO__c11_atomic_fetch_and:
810 case AtomicExpr::AO__atomic_fetch_and:
811 case AtomicExpr::AO__c11_atomic_fetch_or:
812 case AtomicExpr::AO__atomic_fetch_or:
James Y Knight81167fb2015-08-05 16:57:36 +0000813 case AtomicExpr::AO__atomic_fetch_nand:
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000814 case AtomicExpr::AO__c11_atomic_fetch_sub:
815 case AtomicExpr::AO__atomic_fetch_sub:
816 case AtomicExpr::AO__c11_atomic_fetch_xor:
817 case AtomicExpr::AO__atomic_fetch_xor:
James Y Knight81167fb2015-08-05 16:57:36 +0000818 case AtomicExpr::AO__atomic_add_fetch:
819 case AtomicExpr::AO__atomic_and_fetch:
820 case AtomicExpr::AO__atomic_nand_fetch:
821 case AtomicExpr::AO__atomic_or_fetch:
822 case AtomicExpr::AO__atomic_sub_fetch:
823 case AtomicExpr::AO__atomic_xor_fetch:
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000824 // For these, only library calls for certain sizes exist.
825 UseOptimizedLibcall = true;
826 break;
James Y Knight81167fb2015-08-05 16:57:36 +0000827
828 case AtomicExpr::AO__c11_atomic_load:
829 case AtomicExpr::AO__c11_atomic_store:
830 case AtomicExpr::AO__c11_atomic_exchange:
831 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
832 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
833 case AtomicExpr::AO__atomic_load_n:
834 case AtomicExpr::AO__atomic_load:
835 case AtomicExpr::AO__atomic_store_n:
836 case AtomicExpr::AO__atomic_store:
837 case AtomicExpr::AO__atomic_exchange_n:
838 case AtomicExpr::AO__atomic_exchange:
839 case AtomicExpr::AO__atomic_compare_exchange_n:
840 case AtomicExpr::AO__atomic_compare_exchange:
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000841 // Only use optimized library calls for sizes for which they exist.
842 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
843 UseOptimizedLibcall = true;
844 break;
845 }
John McCallfc207f22013-03-07 21:37:12 +0000846
John McCallfc207f22013-03-07 21:37:12 +0000847 CallArgList Args;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000848 if (!UseOptimizedLibcall) {
849 // For non-optimized library calls, the size is the first parameter
850 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
851 getContext().getSizeType());
852 }
853 // Atomic address is the first or second parameter
John McCall7f416cc2015-09-08 08:05:57 +0000854 Args.add(RValue::get(EmitCastToVoidPtr(Ptr.getPointer())),
855 getContext().VoidPtrTy);
John McCallfc207f22013-03-07 21:37:12 +0000856
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000857 std::string LibCallName;
Logan Chien74798a32014-03-26 17:35:01 +0000858 QualType LoweredMemTy =
859 MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000860 QualType RetTy;
861 bool HaveRetTy = false;
James Y Knight7aefb5b2015-11-12 18:37:29 +0000862 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
John McCallfc207f22013-03-07 21:37:12 +0000863 switch (E->getOp()) {
James Y Knight81167fb2015-08-05 16:57:36 +0000864 case AtomicExpr::AO__c11_atomic_init:
865 llvm_unreachable("Already handled!");
866
John McCallfc207f22013-03-07 21:37:12 +0000867 // There is only one libcall for compare an exchange, because there is no
868 // optimisation benefit possible from a libcall version of a weak compare
869 // and exchange.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000870 // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
John McCallfc207f22013-03-07 21:37:12 +0000871 // void *desired, int success, int failure)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000872 // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
873 // int success, int failure)
John McCallfc207f22013-03-07 21:37:12 +0000874 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
875 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
876 case AtomicExpr::AO__atomic_compare_exchange:
877 case AtomicExpr::AO__atomic_compare_exchange_n:
878 LibCallName = "__atomic_compare_exchange";
879 RetTy = getContext().BoolTy;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000880 HaveRetTy = true;
John McCall7f416cc2015-09-08 08:05:57 +0000881 Args.add(RValue::get(EmitCastToVoidPtr(Val1.getPointer())),
882 getContext().VoidPtrTy);
883 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
884 MemTy, E->getExprLoc(), sizeChars);
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000885 Args.add(RValue::get(Order), getContext().IntTy);
John McCallfc207f22013-03-07 21:37:12 +0000886 Order = OrderFail;
887 break;
888 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
889 // int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000890 // T __atomic_exchange_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000891 case AtomicExpr::AO__c11_atomic_exchange:
892 case AtomicExpr::AO__atomic_exchange_n:
893 case AtomicExpr::AO__atomic_exchange:
894 LibCallName = "__atomic_exchange";
John McCall7f416cc2015-09-08 08:05:57 +0000895 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
896 MemTy, E->getExprLoc(), sizeChars);
John McCallfc207f22013-03-07 21:37:12 +0000897 break;
898 // void __atomic_store(size_t size, void *mem, void *val, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000899 // void __atomic_store_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000900 case AtomicExpr::AO__c11_atomic_store:
901 case AtomicExpr::AO__atomic_store:
902 case AtomicExpr::AO__atomic_store_n:
903 LibCallName = "__atomic_store";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000904 RetTy = getContext().VoidTy;
905 HaveRetTy = true;
John McCall7f416cc2015-09-08 08:05:57 +0000906 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
907 MemTy, E->getExprLoc(), sizeChars);
John McCallfc207f22013-03-07 21:37:12 +0000908 break;
909 // void __atomic_load(size_t size, void *mem, void *return, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000910 // T __atomic_load_N(T *mem, int order)
John McCallfc207f22013-03-07 21:37:12 +0000911 case AtomicExpr::AO__c11_atomic_load:
912 case AtomicExpr::AO__atomic_load:
913 case AtomicExpr::AO__atomic_load_n:
914 LibCallName = "__atomic_load";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000915 break;
James Y Knight7aefb5b2015-11-12 18:37:29 +0000916 // T __atomic_add_fetch_N(T *mem, T val, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000917 // T __atomic_fetch_add_N(T *mem, T val, int order)
James Y Knight7aefb5b2015-11-12 18:37:29 +0000918 case AtomicExpr::AO__atomic_add_fetch:
919 PostOp = llvm::Instruction::Add;
920 // Fall through.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000921 case AtomicExpr::AO__c11_atomic_fetch_add:
922 case AtomicExpr::AO__atomic_fetch_add:
923 LibCallName = "__atomic_fetch_add";
John McCall7f416cc2015-09-08 08:05:57 +0000924 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
925 LoweredMemTy, E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000926 break;
James Y Knight7aefb5b2015-11-12 18:37:29 +0000927 // T __atomic_and_fetch_N(T *mem, T val, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000928 // T __atomic_fetch_and_N(T *mem, T val, int order)
James Y Knight7aefb5b2015-11-12 18:37:29 +0000929 case AtomicExpr::AO__atomic_and_fetch:
930 PostOp = llvm::Instruction::And;
931 // Fall through.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000932 case AtomicExpr::AO__c11_atomic_fetch_and:
933 case AtomicExpr::AO__atomic_fetch_and:
934 LibCallName = "__atomic_fetch_and";
John McCall7f416cc2015-09-08 08:05:57 +0000935 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
936 MemTy, E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000937 break;
James Y Knight7aefb5b2015-11-12 18:37:29 +0000938 // T __atomic_or_fetch_N(T *mem, T val, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000939 // T __atomic_fetch_or_N(T *mem, T val, int order)
James Y Knight7aefb5b2015-11-12 18:37:29 +0000940 case AtomicExpr::AO__atomic_or_fetch:
941 PostOp = llvm::Instruction::Or;
942 // Fall through.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000943 case AtomicExpr::AO__c11_atomic_fetch_or:
944 case AtomicExpr::AO__atomic_fetch_or:
945 LibCallName = "__atomic_fetch_or";
John McCall7f416cc2015-09-08 08:05:57 +0000946 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
947 MemTy, E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000948 break;
James Y Knight7aefb5b2015-11-12 18:37:29 +0000949 // T __atomic_sub_fetch_N(T *mem, T val, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000950 // T __atomic_fetch_sub_N(T *mem, T val, int order)
James Y Knight7aefb5b2015-11-12 18:37:29 +0000951 case AtomicExpr::AO__atomic_sub_fetch:
952 PostOp = llvm::Instruction::Sub;
953 // Fall through.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000954 case AtomicExpr::AO__c11_atomic_fetch_sub:
955 case AtomicExpr::AO__atomic_fetch_sub:
956 LibCallName = "__atomic_fetch_sub";
John McCall7f416cc2015-09-08 08:05:57 +0000957 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
958 LoweredMemTy, E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000959 break;
James Y Knight7aefb5b2015-11-12 18:37:29 +0000960 // T __atomic_xor_fetch_N(T *mem, T val, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000961 // T __atomic_fetch_xor_N(T *mem, T val, int order)
James Y Knight7aefb5b2015-11-12 18:37:29 +0000962 case AtomicExpr::AO__atomic_xor_fetch:
963 PostOp = llvm::Instruction::Xor;
964 // Fall through.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000965 case AtomicExpr::AO__c11_atomic_fetch_xor:
966 case AtomicExpr::AO__atomic_fetch_xor:
967 LibCallName = "__atomic_fetch_xor";
John McCall7f416cc2015-09-08 08:05:57 +0000968 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
969 MemTy, E->getExprLoc(), sizeChars);
John McCallfc207f22013-03-07 21:37:12 +0000970 break;
James Y Knight7aefb5b2015-11-12 18:37:29 +0000971 // T __atomic_nand_fetch_N(T *mem, T val, int order)
James Y Knight81167fb2015-08-05 16:57:36 +0000972 // T __atomic_fetch_nand_N(T *mem, T val, int order)
James Y Knight7aefb5b2015-11-12 18:37:29 +0000973 case AtomicExpr::AO__atomic_nand_fetch:
974 PostOp = llvm::Instruction::And; // the NOT is special cased below
975 // Fall through.
James Y Knight81167fb2015-08-05 16:57:36 +0000976 case AtomicExpr::AO__atomic_fetch_nand:
977 LibCallName = "__atomic_fetch_nand";
John McCall7f416cc2015-09-08 08:05:57 +0000978 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
979 MemTy, E->getExprLoc(), sizeChars);
James Y Knight81167fb2015-08-05 16:57:36 +0000980 break;
John McCallfc207f22013-03-07 21:37:12 +0000981 }
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000982
983 // Optimized functions have the size in their name.
984 if (UseOptimizedLibcall)
985 LibCallName += "_" + llvm::utostr(Size);
986 // By default, assume we return a value of the atomic type.
987 if (!HaveRetTy) {
988 if (UseOptimizedLibcall) {
989 // Value is returned directly.
David Majnemer0392cf82014-08-29 07:27:49 +0000990 // The function returns an appropriately sized integer type.
991 RetTy = getContext().getIntTypeForBitwidth(
992 getContext().toBits(sizeChars), /*Signed=*/false);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000993 } else {
994 // Value is returned through parameter before the order.
995 RetTy = getContext().VoidTy;
John McCall7f416cc2015-09-08 08:05:57 +0000996 Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
997 getContext().VoidPtrTy);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000998 }
999 }
John McCallfc207f22013-03-07 21:37:12 +00001000 // order is always the last parameter
1001 Args.add(RValue::get(Order),
1002 getContext().IntTy);
1003
James Y Knight7aefb5b2015-11-12 18:37:29 +00001004 // PostOp is only needed for the atomic_*_fetch operations, and
1005 // thus is only needed for and implemented in the
1006 // UseOptimizedLibcall codepath.
1007 assert(UseOptimizedLibcall || !PostOp);
1008
David Majnemer659be552014-11-25 23:44:32 +00001009 RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
1010 // The value is returned directly from the libcall.
Tim Northovercc2a6e02015-11-09 19:56:35 +00001011 if (E->isCmpXChg())
David Majnemer659be552014-11-25 23:44:32 +00001012 return Res;
Tim Northovercc2a6e02015-11-09 19:56:35 +00001013
1014 // The value is returned directly for optimized libcalls but the expr
1015 // provided an out-param.
1016 if (UseOptimizedLibcall && Res.getScalarVal()) {
David Majnemer659be552014-11-25 23:44:32 +00001017 llvm::Value *ResVal = Res.getScalarVal();
James Y Knight7aefb5b2015-11-12 18:37:29 +00001018 if (PostOp) {
1019 llvm::Value *LoadVal1 = Args[1].RV.getScalarVal();
1020 ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1021 }
1022 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
1023 ResVal = Builder.CreateNot(ResVal);
1024
Tim Northovercc2a6e02015-11-09 19:56:35 +00001025 Builder.CreateStore(
1026 ResVal,
1027 Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
David Majnemer659be552014-11-25 23:44:32 +00001028 }
Tim Northovercc2a6e02015-11-09 19:56:35 +00001029
1030 if (RValTy->isVoidType())
1031 return RValue::get(nullptr);
1032
1033 return convertTempToRValue(
1034 Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1035 RValTy, E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +00001036 }
1037
1038 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1039 E->getOp() == AtomicExpr::AO__atomic_store ||
1040 E->getOp() == AtomicExpr::AO__atomic_store_n;
1041 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1042 E->getOp() == AtomicExpr::AO__atomic_load ||
1043 E->getOp() == AtomicExpr::AO__atomic_load_n;
1044
John McCallfc207f22013-03-07 21:37:12 +00001045 if (isa<llvm::ConstantInt>(Order)) {
1046 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1047 switch (ord) {
Tim Northovere94a34c2014-03-11 10:49:14 +00001048 case AtomicExpr::AO_ABI_memory_order_relaxed:
Tim Northovercadbbe12014-06-13 19:43:04 +00001049 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
JF Bastien92f4ef12016-04-06 17:26:42 +00001050 Size, llvm::AtomicOrdering::Monotonic);
John McCallfc207f22013-03-07 21:37:12 +00001051 break;
Tim Northovere94a34c2014-03-11 10:49:14 +00001052 case AtomicExpr::AO_ABI_memory_order_consume:
1053 case AtomicExpr::AO_ABI_memory_order_acquire:
John McCallfc207f22013-03-07 21:37:12 +00001054 if (IsStore)
1055 break; // Avoid crashing on code with undefined behavior
Tim Northovercadbbe12014-06-13 19:43:04 +00001056 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
JF Bastien92f4ef12016-04-06 17:26:42 +00001057 Size, llvm::AtomicOrdering::Acquire);
John McCallfc207f22013-03-07 21:37:12 +00001058 break;
Tim Northovere94a34c2014-03-11 10:49:14 +00001059 case AtomicExpr::AO_ABI_memory_order_release:
John McCallfc207f22013-03-07 21:37:12 +00001060 if (IsLoad)
1061 break; // Avoid crashing on code with undefined behavior
Tim Northovercadbbe12014-06-13 19:43:04 +00001062 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
JF Bastien92f4ef12016-04-06 17:26:42 +00001063 Size, llvm::AtomicOrdering::Release);
John McCallfc207f22013-03-07 21:37:12 +00001064 break;
Tim Northovere94a34c2014-03-11 10:49:14 +00001065 case AtomicExpr::AO_ABI_memory_order_acq_rel:
John McCallfc207f22013-03-07 21:37:12 +00001066 if (IsLoad || IsStore)
1067 break; // Avoid crashing on code with undefined behavior
Tim Northovercadbbe12014-06-13 19:43:04 +00001068 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
JF Bastien92f4ef12016-04-06 17:26:42 +00001069 Size, llvm::AtomicOrdering::AcquireRelease);
John McCallfc207f22013-03-07 21:37:12 +00001070 break;
Tim Northovere94a34c2014-03-11 10:49:14 +00001071 case AtomicExpr::AO_ABI_memory_order_seq_cst:
Tim Northovercadbbe12014-06-13 19:43:04 +00001072 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
JF Bastien92f4ef12016-04-06 17:26:42 +00001073 Size, llvm::AtomicOrdering::SequentiallyConsistent);
John McCallfc207f22013-03-07 21:37:12 +00001074 break;
1075 default: // invalid order
1076 // We should not ever get here normally, but it's hard to
1077 // enforce that in general.
1078 break;
1079 }
David Majnemeree8d04d2014-12-12 08:16:09 +00001080 if (RValTy->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +00001081 return RValue::get(nullptr);
Tim Northovercc2a6e02015-11-09 19:56:35 +00001082
1083 return convertTempToRValue(
1084 Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1085 RValTy, E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +00001086 }
1087
1088 // Long case, when Order isn't obviously constant.
1089
1090 // Create all the relevant BB's
Craig Topper8a13c412014-05-21 05:09:00 +00001091 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1092 *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1093 *SeqCstBB = nullptr;
John McCallfc207f22013-03-07 21:37:12 +00001094 MonotonicBB = createBasicBlock("monotonic", CurFn);
1095 if (!IsStore)
1096 AcquireBB = createBasicBlock("acquire", CurFn);
1097 if (!IsLoad)
1098 ReleaseBB = createBasicBlock("release", CurFn);
1099 if (!IsLoad && !IsStore)
1100 AcqRelBB = createBasicBlock("acqrel", CurFn);
1101 SeqCstBB = createBasicBlock("seqcst", CurFn);
1102 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1103
1104 // Create the switch for the split
1105 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1106 // doesn't matter unless someone is crazy enough to use something that
1107 // doesn't fold to a constant for the ordering.
1108 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1109 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1110
1111 // Emit all the different atomics
1112 Builder.SetInsertPoint(MonotonicBB);
Tim Northovercadbbe12014-06-13 19:43:04 +00001113 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
JF Bastien92f4ef12016-04-06 17:26:42 +00001114 Size, llvm::AtomicOrdering::Monotonic);
John McCallfc207f22013-03-07 21:37:12 +00001115 Builder.CreateBr(ContBB);
1116 if (!IsStore) {
1117 Builder.SetInsertPoint(AcquireBB);
Tim Northovercadbbe12014-06-13 19:43:04 +00001118 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
JF Bastien92f4ef12016-04-06 17:26:42 +00001119 Size, llvm::AtomicOrdering::Acquire);
John McCallfc207f22013-03-07 21:37:12 +00001120 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +00001121 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
1122 AcquireBB);
1123 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
1124 AcquireBB);
John McCallfc207f22013-03-07 21:37:12 +00001125 }
1126 if (!IsLoad) {
1127 Builder.SetInsertPoint(ReleaseBB);
Tim Northovercadbbe12014-06-13 19:43:04 +00001128 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
JF Bastien92f4ef12016-04-06 17:26:42 +00001129 Size, llvm::AtomicOrdering::Release);
John McCallfc207f22013-03-07 21:37:12 +00001130 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +00001131 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
1132 ReleaseBB);
John McCallfc207f22013-03-07 21:37:12 +00001133 }
1134 if (!IsLoad && !IsStore) {
1135 Builder.SetInsertPoint(AcqRelBB);
Tim Northovercadbbe12014-06-13 19:43:04 +00001136 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
JF Bastien92f4ef12016-04-06 17:26:42 +00001137 Size, llvm::AtomicOrdering::AcquireRelease);
John McCallfc207f22013-03-07 21:37:12 +00001138 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +00001139 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
1140 AcqRelBB);
John McCallfc207f22013-03-07 21:37:12 +00001141 }
1142 Builder.SetInsertPoint(SeqCstBB);
Tim Northovercadbbe12014-06-13 19:43:04 +00001143 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
JF Bastien92f4ef12016-04-06 17:26:42 +00001144 Size, llvm::AtomicOrdering::SequentiallyConsistent);
John McCallfc207f22013-03-07 21:37:12 +00001145 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +00001146 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
1147 SeqCstBB);
John McCallfc207f22013-03-07 21:37:12 +00001148
1149 // Cleanup and return
1150 Builder.SetInsertPoint(ContBB);
David Majnemeree8d04d2014-12-12 08:16:09 +00001151 if (RValTy->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +00001152 return RValue::get(nullptr);
Tim Northovercc2a6e02015-11-09 19:56:35 +00001153
1154 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1155 return convertTempToRValue(
1156 Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1157 RValTy, E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +00001158}
John McCalla8ec7eb2013-03-07 21:37:17 +00001159
John McCall7f416cc2015-09-08 08:05:57 +00001160Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
John McCalla8ec7eb2013-03-07 21:37:17 +00001161 unsigned addrspace =
John McCall7f416cc2015-09-08 08:05:57 +00001162 cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
John McCalla8ec7eb2013-03-07 21:37:17 +00001163 llvm::IntegerType *ty =
1164 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1165 return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1166}
1167
Tim Northovercc2a6e02015-11-09 19:56:35 +00001168Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1169 llvm::Type *Ty = Addr.getElementType();
1170 uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1171 if (SourceSizeInBits != AtomicSizeInBits) {
1172 Address Tmp = CreateTempAlloca();
1173 CGF.Builder.CreateMemCpy(Tmp, Addr,
1174 std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1175 Addr = Tmp;
1176 }
1177
1178 return emitCastToAtomicIntPointer(Addr);
1179}
1180
John McCall7f416cc2015-09-08 08:05:57 +00001181RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1182 AggValueSlot resultSlot,
1183 SourceLocation loc,
1184 bool asValue) const {
Alexey Bataevb57056f2015-01-22 06:17:56 +00001185 if (LVal.isSimple()) {
1186 if (EvaluationKind == TEK_Aggregate)
1187 return resultSlot.asRValue();
John McCalla8ec7eb2013-03-07 21:37:17 +00001188
Alexey Bataevb57056f2015-01-22 06:17:56 +00001189 // Drill into the padding structure if we have one.
1190 if (hasPadding())
John McCall7f416cc2015-09-08 08:05:57 +00001191 addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
John McCalla8ec7eb2013-03-07 21:37:17 +00001192
Alexey Bataevb57056f2015-01-22 06:17:56 +00001193 // Otherwise, just convert the temporary to an r-value using the
1194 // normal conversion routine.
1195 return CGF.convertTempToRValue(addr, getValueType(), loc);
David Blaikie1ed728c2015-04-05 22:45:47 +00001196 }
John McCall7f416cc2015-09-08 08:05:57 +00001197 if (!asValue)
Alexey Bataevb8329262015-02-27 06:33:30 +00001198 // Get RValue from temp memory as atomic for non-simple lvalues
John McCall7f416cc2015-09-08 08:05:57 +00001199 return RValue::get(CGF.Builder.CreateLoad(addr));
David Blaikie1ed728c2015-04-05 22:45:47 +00001200 if (LVal.isBitField())
John McCall7f416cc2015-09-08 08:05:57 +00001201 return CGF.EmitLoadOfBitfieldLValue(
1202 LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1203 LVal.getAlignmentSource()));
David Blaikie1ed728c2015-04-05 22:45:47 +00001204 if (LVal.isVectorElt())
John McCall7f416cc2015-09-08 08:05:57 +00001205 return CGF.EmitLoadOfLValue(
1206 LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1207 LVal.getAlignmentSource()), loc);
Alexey Bataevb57056f2015-01-22 06:17:56 +00001208 assert(LVal.isExtVectorElt());
1209 return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
John McCall7f416cc2015-09-08 08:05:57 +00001210 addr, LVal.getExtVectorElts(), LVal.getType(),
1211 LVal.getAlignmentSource()));
John McCalla8ec7eb2013-03-07 21:37:17 +00001212}
1213
Alexey Bataevb8329262015-02-27 06:33:30 +00001214RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1215 AggValueSlot ResultSlot,
1216 SourceLocation Loc,
1217 bool AsValue) const {
Alexey Bataev452d8e12014-12-15 05:25:25 +00001218 // Try not to in some easy cases.
1219 assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
Alexey Bataevb8329262015-02-27 06:33:30 +00001220 if (getEvaluationKind() == TEK_Scalar &&
1221 (((!LVal.isBitField() ||
1222 LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1223 !hasPadding()) ||
1224 !AsValue)) {
1225 auto *ValTy = AsValue
1226 ? CGF.ConvertTypeForMem(ValueTy)
John McCall7f416cc2015-09-08 08:05:57 +00001227 : getAtomicAddress().getType()->getPointerElementType();
Alexey Bataev452d8e12014-12-15 05:25:25 +00001228 if (ValTy->isIntegerTy()) {
1229 assert(IntVal->getType() == ValTy && "Different integer types.");
David Majnemereeaec262015-02-14 02:18:14 +00001230 return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
Alexey Bataev452d8e12014-12-15 05:25:25 +00001231 } else if (ValTy->isPointerTy())
1232 return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1233 else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1234 return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1235 }
1236
1237 // Create a temporary. This needs to be big enough to hold the
1238 // atomic integer.
John McCall7f416cc2015-09-08 08:05:57 +00001239 Address Temp = Address::invalid();
Alexey Bataev452d8e12014-12-15 05:25:25 +00001240 bool TempIsVolatile = false;
Alexey Bataevb8329262015-02-27 06:33:30 +00001241 if (AsValue && getEvaluationKind() == TEK_Aggregate) {
Alexey Bataev452d8e12014-12-15 05:25:25 +00001242 assert(!ResultSlot.isIgnored());
John McCall7f416cc2015-09-08 08:05:57 +00001243 Temp = ResultSlot.getAddress();
Alexey Bataev452d8e12014-12-15 05:25:25 +00001244 TempIsVolatile = ResultSlot.isVolatile();
1245 } else {
Alexey Bataevb8329262015-02-27 06:33:30 +00001246 Temp = CreateTempAlloca();
Alexey Bataev452d8e12014-12-15 05:25:25 +00001247 }
1248
1249 // Slam the integer into the temporary.
John McCall7f416cc2015-09-08 08:05:57 +00001250 Address CastTemp = emitCastToAtomicIntPointer(Temp);
1251 CGF.Builder.CreateStore(IntVal, CastTemp)
Alexey Bataev452d8e12014-12-15 05:25:25 +00001252 ->setVolatile(TempIsVolatile);
1253
John McCall7f416cc2015-09-08 08:05:57 +00001254 return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
Alexey Bataevb8329262015-02-27 06:33:30 +00001255}
1256
1257void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1258 llvm::AtomicOrdering AO, bool) {
1259 // void __atomic_load(size_t size, void *mem, void *return, int order);
1260 CallArgList Args;
1261 Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
John McCall7f416cc2015-09-08 08:05:57 +00001262 Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
Alexey Bataevb8329262015-02-27 06:33:30 +00001263 CGF.getContext().VoidPtrTy);
1264 Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1265 CGF.getContext().VoidPtrTy);
1266 Args.add(RValue::get(
1267 llvm::ConstantInt::get(CGF.IntTy, translateAtomicOrdering(AO))),
1268 CGF.getContext().IntTy);
1269 emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1270}
1271
1272llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1273 bool IsVolatile) {
1274 // Okay, we're doing this natively.
John McCall7f416cc2015-09-08 08:05:57 +00001275 Address Addr = getAtomicAddressAsAtomicIntPointer();
Alexey Bataevb8329262015-02-27 06:33:30 +00001276 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1277 Load->setAtomic(AO);
1278
1279 // Other decoration.
Alexey Bataevb8329262015-02-27 06:33:30 +00001280 if (IsVolatile)
1281 Load->setVolatile(true);
1282 if (LVal.getTBAAInfo())
Piotr Padlewski4b1ac722015-09-15 21:46:55 +00001283 CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
Alexey Bataevb8329262015-02-27 06:33:30 +00001284 return Load;
Alexey Bataev452d8e12014-12-15 05:25:25 +00001285}
1286
David Majnemera5b195a2015-02-14 01:35:12 +00001287/// An LValue is a candidate for having its loads and stores be made atomic if
1288/// we are operating under /volatile:ms *and* the LValue itself is volatile and
1289/// performing such an operation can be performed without a libcall.
1290bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
John McCall7f416cc2015-09-08 08:05:57 +00001291 if (!CGM.getCodeGenOpts().MSVolatile) return false;
David Majnemera5b195a2015-02-14 01:35:12 +00001292 AtomicInfo AI(*this, LV);
1293 bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1294 // An atomic is inline if we don't need to use a libcall.
1295 bool AtomicIsInline = !AI.shouldUseLibcall();
John McCall7f416cc2015-09-08 08:05:57 +00001296 return IsVolatile && AtomicIsInline;
David Majnemera5b195a2015-02-14 01:35:12 +00001297}
1298
1299/// An type is a candidate for having its loads and stores be made atomic if
1300/// we are operating under /volatile:ms *and* we know the access is volatile and
1301/// performing such an operation can be performed without a libcall.
1302bool CodeGenFunction::typeIsSuitableForInlineAtomic(QualType Ty,
1303 bool IsVolatile) const {
David Majnemerfc80b6e2016-01-22 16:36:44 +00001304 // The operation must be volatile for us to make it atomic.
1305 if (!IsVolatile)
1306 return false;
1307 // The -fms-volatile flag must be passed for us to adopt this behavior.
1308 if (!CGM.getCodeGenOpts().MSVolatile)
1309 return false;
1310
David Majnemera5b195a2015-02-14 01:35:12 +00001311 // An atomic is inline if we don't need to use a libcall (e.g. it is builtin).
David Majnemerfc80b6e2016-01-22 16:36:44 +00001312 if (!getContext().getTargetInfo().hasBuiltinAtomic(
1313 getContext().getTypeSize(Ty), getContext().getTypeAlign(Ty)))
1314 return false;
1315
1316 // MSVC doesn't seem to do this for types wider than a pointer.
1317 if (getContext().getTypeSize(Ty) >
1318 getContext().getTypeSize(getContext().getIntPtrType()))
1319 return false;
1320 return true;
David Majnemera5b195a2015-02-14 01:35:12 +00001321}
1322
1323RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
1324 AggValueSlot Slot) {
1325 llvm::AtomicOrdering AO;
1326 bool IsVolatile = LV.isVolatileQualified();
1327 if (LV.getType()->isAtomicType()) {
JF Bastien92f4ef12016-04-06 17:26:42 +00001328 AO = llvm::AtomicOrdering::SequentiallyConsistent;
David Majnemera5b195a2015-02-14 01:35:12 +00001329 } else {
JF Bastien92f4ef12016-04-06 17:26:42 +00001330 AO = llvm::AtomicOrdering::Acquire;
David Majnemera5b195a2015-02-14 01:35:12 +00001331 IsVolatile = true;
1332 }
1333 return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1334}
1335
Alexey Bataevb8329262015-02-27 06:33:30 +00001336RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1337 bool AsValue, llvm::AtomicOrdering AO,
1338 bool IsVolatile) {
1339 // Check whether we should use a library call.
1340 if (shouldUseLibcall()) {
John McCall7f416cc2015-09-08 08:05:57 +00001341 Address TempAddr = Address::invalid();
Alexey Bataevb8329262015-02-27 06:33:30 +00001342 if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1343 assert(getEvaluationKind() == TEK_Aggregate);
John McCall7f416cc2015-09-08 08:05:57 +00001344 TempAddr = ResultSlot.getAddress();
Alexey Bataevb8329262015-02-27 06:33:30 +00001345 } else
1346 TempAddr = CreateTempAlloca();
1347
John McCall7f416cc2015-09-08 08:05:57 +00001348 EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
Alexey Bataevb8329262015-02-27 06:33:30 +00001349
1350 // Okay, turn that back into the original value or whole atomic (for
1351 // non-simple lvalues) type.
John McCall7f416cc2015-09-08 08:05:57 +00001352 return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
Alexey Bataevb8329262015-02-27 06:33:30 +00001353 }
1354
1355 // Okay, we're doing this natively.
1356 auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1357
1358 // If we're ignoring an aggregate return, don't do anything.
1359 if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
John McCall7f416cc2015-09-08 08:05:57 +00001360 return RValue::getAggregate(Address::invalid(), false);
Alexey Bataevb8329262015-02-27 06:33:30 +00001361
1362 // Okay, turn that back into the original value or atomic (for non-simple
1363 // lvalues) type.
1364 return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1365}
1366
John McCalla8ec7eb2013-03-07 21:37:17 +00001367/// Emit a load from an l-value of atomic type. Note that the r-value
1368/// we produce is an r-value of the atomic *value* type.
Nick Lewycky2d84e842013-10-02 02:29:49 +00001369RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
David Majnemera5b195a2015-02-14 01:35:12 +00001370 llvm::AtomicOrdering AO, bool IsVolatile,
Nick Lewycky2d84e842013-10-02 02:29:49 +00001371 AggValueSlot resultSlot) {
Alexey Bataevb8329262015-02-27 06:33:30 +00001372 AtomicInfo Atomics(*this, src);
1373 return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1374 IsVolatile);
John McCalla8ec7eb2013-03-07 21:37:17 +00001375}
1376
John McCalla8ec7eb2013-03-07 21:37:17 +00001377/// Copy an r-value into memory as part of storing to an atomic type.
1378/// This needs to create a bit-pattern suitable for atomic operations.
Alexey Bataevb57056f2015-01-22 06:17:56 +00001379void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1380 assert(LVal.isSimple());
John McCalla8ec7eb2013-03-07 21:37:17 +00001381 // If we have an r-value, the rvalue should be of the atomic type,
1382 // which means that the caller is responsible for having zeroed
1383 // any padding. Just do an aggregate copy of that type.
1384 if (rvalue.isAggregate()) {
Alexey Bataevb8329262015-02-27 06:33:30 +00001385 CGF.EmitAggregateCopy(getAtomicAddress(),
John McCall7f416cc2015-09-08 08:05:57 +00001386 rvalue.getAggregateAddress(),
John McCalla8ec7eb2013-03-07 21:37:17 +00001387 getAtomicType(),
1388 (rvalue.isVolatileQualified()
John McCall7f416cc2015-09-08 08:05:57 +00001389 || LVal.isVolatileQualified()));
John McCalla8ec7eb2013-03-07 21:37:17 +00001390 return;
1391 }
1392
1393 // Okay, otherwise we're copying stuff.
1394
1395 // Zero out the buffer if necessary.
Alexey Bataevb57056f2015-01-22 06:17:56 +00001396 emitMemSetZeroIfNecessary();
John McCalla8ec7eb2013-03-07 21:37:17 +00001397
1398 // Drill past the padding if present.
Alexey Bataevb57056f2015-01-22 06:17:56 +00001399 LValue TempLVal = projectValue();
John McCalla8ec7eb2013-03-07 21:37:17 +00001400
1401 // Okay, store the rvalue in.
1402 if (rvalue.isScalar()) {
Alexey Bataevb57056f2015-01-22 06:17:56 +00001403 CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
John McCalla8ec7eb2013-03-07 21:37:17 +00001404 } else {
Alexey Bataevb57056f2015-01-22 06:17:56 +00001405 CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
John McCalla8ec7eb2013-03-07 21:37:17 +00001406 }
1407}
1408
1409
1410/// Materialize an r-value into memory for the purposes of storing it
1411/// to an atomic type.
John McCall7f416cc2015-09-08 08:05:57 +00001412Address AtomicInfo::materializeRValue(RValue rvalue) const {
John McCalla8ec7eb2013-03-07 21:37:17 +00001413 // Aggregate r-values are already in memory, and EmitAtomicStore
1414 // requires them to be values of the atomic type.
1415 if (rvalue.isAggregate())
John McCall7f416cc2015-09-08 08:05:57 +00001416 return rvalue.getAggregateAddress();
John McCalla8ec7eb2013-03-07 21:37:17 +00001417
1418 // Otherwise, make a temporary and materialize into it.
John McCall7f416cc2015-09-08 08:05:57 +00001419 LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
Alexey Bataevb8329262015-02-27 06:33:30 +00001420 AtomicInfo Atomics(CGF, TempLV);
Alexey Bataevb57056f2015-01-22 06:17:56 +00001421 Atomics.emitCopyIntoMemory(rvalue);
Alexey Bataevb8329262015-02-27 06:33:30 +00001422 return TempLV.getAddress();
John McCalla8ec7eb2013-03-07 21:37:17 +00001423}
1424
Alexey Bataev452d8e12014-12-15 05:25:25 +00001425llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1426 // If we've got a scalar value of the right size, try to avoid going
1427 // through memory.
Alexey Bataevb8329262015-02-27 06:33:30 +00001428 if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
Alexey Bataev452d8e12014-12-15 05:25:25 +00001429 llvm::Value *Value = RVal.getScalarVal();
1430 if (isa<llvm::IntegerType>(Value->getType()))
Alexey Bataevb4505a72015-03-30 05:20:59 +00001431 return CGF.EmitToMemory(Value, ValueTy);
Alexey Bataev452d8e12014-12-15 05:25:25 +00001432 else {
Alexey Bataevb8329262015-02-27 06:33:30 +00001433 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1434 CGF.getLLVMContext(),
1435 LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
Alexey Bataev452d8e12014-12-15 05:25:25 +00001436 if (isa<llvm::PointerType>(Value->getType()))
1437 return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1438 else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1439 return CGF.Builder.CreateBitCast(Value, InputIntTy);
1440 }
1441 }
1442 // Otherwise, we need to go through memory.
1443 // Put the r-value in memory.
John McCall7f416cc2015-09-08 08:05:57 +00001444 Address Addr = materializeRValue(RVal);
Alexey Bataev452d8e12014-12-15 05:25:25 +00001445
1446 // Cast the temporary to the atomic int type and pull a value out.
1447 Addr = emitCastToAtomicIntPointer(Addr);
John McCall7f416cc2015-09-08 08:05:57 +00001448 return CGF.Builder.CreateLoad(Addr);
Alexey Bataev452d8e12014-12-15 05:25:25 +00001449}
1450
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001451std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1452 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1453 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
Alexey Bataevb8329262015-02-27 06:33:30 +00001454 // Do the atomic store.
John McCall7f416cc2015-09-08 08:05:57 +00001455 Address Addr = getAtomicAddressAsAtomicIntPointer();
1456 auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
1457 ExpectedVal, DesiredVal,
Alexey Bataevb4505a72015-03-30 05:20:59 +00001458 Success, Failure);
Alexey Bataevb8329262015-02-27 06:33:30 +00001459 // Other decoration.
1460 Inst->setVolatile(LVal.isVolatileQualified());
1461 Inst->setWeak(IsWeak);
1462
1463 // Okay, turn that back into the original value type.
1464 auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1465 auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001466 return std::make_pair(PreviousVal, SuccessFailureVal);
Alexey Bataevb8329262015-02-27 06:33:30 +00001467}
1468
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001469llvm::Value *
1470AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1471 llvm::Value *DesiredAddr,
Alexey Bataevb8329262015-02-27 06:33:30 +00001472 llvm::AtomicOrdering Success,
1473 llvm::AtomicOrdering Failure) {
1474 // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1475 // void *desired, int success, int failure);
1476 CallArgList Args;
1477 Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
John McCall7f416cc2015-09-08 08:05:57 +00001478 Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
Alexey Bataevb8329262015-02-27 06:33:30 +00001479 CGF.getContext().VoidPtrTy);
1480 Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1481 CGF.getContext().VoidPtrTy);
1482 Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1483 CGF.getContext().VoidPtrTy);
1484 Args.add(RValue::get(llvm::ConstantInt::get(
1485 CGF.IntTy, translateAtomicOrdering(Success))),
1486 CGF.getContext().IntTy);
1487 Args.add(RValue::get(llvm::ConstantInt::get(
1488 CGF.IntTy, translateAtomicOrdering(Failure))),
1489 CGF.getContext().IntTy);
1490 auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1491 CGF.getContext().BoolTy, Args);
Alexey Bataevb4505a72015-03-30 05:20:59 +00001492
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001493 return SuccessFailureRVal.getScalarVal();
Alexey Bataevb8329262015-02-27 06:33:30 +00001494}
1495
Alexey Bataevb4505a72015-03-30 05:20:59 +00001496std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
Alexey Bataevb8329262015-02-27 06:33:30 +00001497 RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1498 llvm::AtomicOrdering Failure, bool IsWeak) {
1499 if (Failure >= Success)
1500 // Don't assert on undefined behavior.
1501 Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1502
1503 // Check whether we should use a library call.
1504 if (shouldUseLibcall()) {
Alexey Bataevb8329262015-02-27 06:33:30 +00001505 // Produce a source address.
John McCall7f416cc2015-09-08 08:05:57 +00001506 Address ExpectedAddr = materializeRValue(Expected);
1507 Address DesiredAddr = materializeRValue(Desired);
1508 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1509 DesiredAddr.getPointer(),
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001510 Success, Failure);
1511 return std::make_pair(
John McCall7f416cc2015-09-08 08:05:57 +00001512 convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1513 SourceLocation(), /*AsValue=*/false),
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001514 Res);
Alexey Bataevb8329262015-02-27 06:33:30 +00001515 }
1516
1517 // If we've got a scalar value of the right size, try to avoid going
1518 // through memory.
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001519 auto *ExpectedVal = convertRValueToInt(Expected);
1520 auto *DesiredVal = convertRValueToInt(Desired);
1521 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1522 Failure, IsWeak);
1523 return std::make_pair(
1524 ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1525 SourceLocation(), /*AsValue=*/false),
1526 Res.second);
1527}
1528
1529static void
1530EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1531 const llvm::function_ref<RValue(RValue)> &UpdateOp,
John McCall7f416cc2015-09-08 08:05:57 +00001532 Address DesiredAddr) {
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001533 RValue UpRVal;
1534 LValue AtomicLVal = Atomics.getAtomicLValue();
1535 LValue DesiredLVal;
1536 if (AtomicLVal.isSimple()) {
1537 UpRVal = OldRVal;
John McCall7f416cc2015-09-08 08:05:57 +00001538 DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001539 } else {
1540 // Build new lvalue for temp address
John McCall7f416cc2015-09-08 08:05:57 +00001541 Address Ptr = Atomics.materializeRValue(OldRVal);
1542 LValue UpdateLVal;
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001543 if (AtomicLVal.isBitField()) {
1544 UpdateLVal =
1545 LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
John McCall7f416cc2015-09-08 08:05:57 +00001546 AtomicLVal.getType(),
1547 AtomicLVal.getAlignmentSource());
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001548 DesiredLVal =
1549 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
John McCall7f416cc2015-09-08 08:05:57 +00001550 AtomicLVal.getType(),
1551 AtomicLVal.getAlignmentSource());
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001552 } else if (AtomicLVal.isVectorElt()) {
1553 UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1554 AtomicLVal.getType(),
John McCall7f416cc2015-09-08 08:05:57 +00001555 AtomicLVal.getAlignmentSource());
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001556 DesiredLVal = LValue::MakeVectorElt(
1557 DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
John McCall7f416cc2015-09-08 08:05:57 +00001558 AtomicLVal.getAlignmentSource());
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001559 } else {
1560 assert(AtomicLVal.isExtVectorElt());
1561 UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1562 AtomicLVal.getType(),
John McCall7f416cc2015-09-08 08:05:57 +00001563 AtomicLVal.getAlignmentSource());
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001564 DesiredLVal = LValue::MakeExtVectorElt(
1565 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
John McCall7f416cc2015-09-08 08:05:57 +00001566 AtomicLVal.getAlignmentSource());
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001567 }
1568 UpdateLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
1569 DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
1570 UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1571 }
1572 // Store new value in the corresponding memory area
1573 RValue NewRVal = UpdateOp(UpRVal);
1574 if (NewRVal.isScalar()) {
1575 CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1576 } else {
1577 assert(NewRVal.isComplex());
1578 CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1579 /*isInit=*/false);
1580 }
1581}
1582
1583void AtomicInfo::EmitAtomicUpdateLibcall(
1584 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1585 bool IsVolatile) {
1586 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1587
John McCall7f416cc2015-09-08 08:05:57 +00001588 Address ExpectedAddr = CreateTempAlloca();
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001589
John McCall7f416cc2015-09-08 08:05:57 +00001590 EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001591 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1592 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1593 CGF.EmitBlock(ContBB);
John McCall7f416cc2015-09-08 08:05:57 +00001594 Address DesiredAddr = CreateTempAlloca();
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001595 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
John McCall7f416cc2015-09-08 08:05:57 +00001596 requiresMemSetZero(getAtomicAddress().getElementType())) {
1597 auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1598 CGF.Builder.CreateStore(OldVal, DesiredAddr);
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001599 }
John McCall7f416cc2015-09-08 08:05:57 +00001600 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1601 AggValueSlot::ignored(),
1602 SourceLocation(), /*AsValue=*/false);
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001603 EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1604 auto *Res =
John McCall7f416cc2015-09-08 08:05:57 +00001605 EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1606 DesiredAddr.getPointer(),
1607 AO, Failure);
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001608 CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1609 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1610}
1611
1612void AtomicInfo::EmitAtomicUpdateOp(
1613 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1614 bool IsVolatile) {
1615 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1616
1617 // Do the atomic load.
1618 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1619 // For non-simple lvalues perform compare-and-swap procedure.
1620 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1621 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1622 auto *CurBB = CGF.Builder.GetInsertBlock();
1623 CGF.EmitBlock(ContBB);
1624 llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1625 /*NumReservedValues=*/2);
1626 PHI->addIncoming(OldVal, CurBB);
John McCall7f416cc2015-09-08 08:05:57 +00001627 Address NewAtomicAddr = CreateTempAlloca();
1628 Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001629 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
John McCall7f416cc2015-09-08 08:05:57 +00001630 requiresMemSetZero(getAtomicAddress().getElementType())) {
1631 CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001632 }
1633 auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1634 SourceLocation(), /*AsValue=*/false);
1635 EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
John McCall7f416cc2015-09-08 08:05:57 +00001636 auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001637 // Try to write new value using cmpxchg operation
1638 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1639 PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1640 CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1641 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1642}
1643
1644static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
John McCall7f416cc2015-09-08 08:05:57 +00001645 RValue UpdateRVal, Address DesiredAddr) {
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001646 LValue AtomicLVal = Atomics.getAtomicLValue();
1647 LValue DesiredLVal;
1648 // Build new lvalue for temp address
1649 if (AtomicLVal.isBitField()) {
1650 DesiredLVal =
1651 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
John McCall7f416cc2015-09-08 08:05:57 +00001652 AtomicLVal.getType(),
1653 AtomicLVal.getAlignmentSource());
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001654 } else if (AtomicLVal.isVectorElt()) {
1655 DesiredLVal =
1656 LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
John McCall7f416cc2015-09-08 08:05:57 +00001657 AtomicLVal.getType(),
1658 AtomicLVal.getAlignmentSource());
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001659 } else {
1660 assert(AtomicLVal.isExtVectorElt());
1661 DesiredLVal = LValue::MakeExtVectorElt(
1662 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
John McCall7f416cc2015-09-08 08:05:57 +00001663 AtomicLVal.getAlignmentSource());
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001664 }
1665 DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
1666 // Store new value in the corresponding memory area
1667 assert(UpdateRVal.isScalar());
1668 CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1669}
1670
1671void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1672 RValue UpdateRVal, bool IsVolatile) {
1673 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1674
John McCall7f416cc2015-09-08 08:05:57 +00001675 Address ExpectedAddr = CreateTempAlloca();
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001676
John McCall7f416cc2015-09-08 08:05:57 +00001677 EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001678 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1679 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1680 CGF.EmitBlock(ContBB);
John McCall7f416cc2015-09-08 08:05:57 +00001681 Address DesiredAddr = CreateTempAlloca();
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001682 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
John McCall7f416cc2015-09-08 08:05:57 +00001683 requiresMemSetZero(getAtomicAddress().getElementType())) {
1684 auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1685 CGF.Builder.CreateStore(OldVal, DesiredAddr);
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001686 }
1687 EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1688 auto *Res =
John McCall7f416cc2015-09-08 08:05:57 +00001689 EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1690 DesiredAddr.getPointer(),
1691 AO, Failure);
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001692 CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1693 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1694}
1695
1696void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1697 bool IsVolatile) {
1698 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1699
1700 // Do the atomic load.
1701 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1702 // For non-simple lvalues perform compare-and-swap procedure.
1703 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1704 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1705 auto *CurBB = CGF.Builder.GetInsertBlock();
1706 CGF.EmitBlock(ContBB);
1707 llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1708 /*NumReservedValues=*/2);
1709 PHI->addIncoming(OldVal, CurBB);
John McCall7f416cc2015-09-08 08:05:57 +00001710 Address NewAtomicAddr = CreateTempAlloca();
1711 Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001712 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
John McCall7f416cc2015-09-08 08:05:57 +00001713 requiresMemSetZero(getAtomicAddress().getElementType())) {
1714 CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001715 }
1716 EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
John McCall7f416cc2015-09-08 08:05:57 +00001717 auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001718 // Try to write new value using cmpxchg operation
1719 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1720 PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1721 CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1722 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1723}
1724
1725void AtomicInfo::EmitAtomicUpdate(
1726 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1727 bool IsVolatile) {
1728 if (shouldUseLibcall()) {
1729 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1730 } else {
1731 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1732 }
1733}
1734
1735void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1736 bool IsVolatile) {
1737 if (shouldUseLibcall()) {
1738 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1739 } else {
1740 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1741 }
Alexey Bataevb8329262015-02-27 06:33:30 +00001742}
1743
David Majnemera5b195a2015-02-14 01:35:12 +00001744void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
1745 bool isInit) {
1746 bool IsVolatile = lvalue.isVolatileQualified();
1747 llvm::AtomicOrdering AO;
1748 if (lvalue.getType()->isAtomicType()) {
JF Bastien92f4ef12016-04-06 17:26:42 +00001749 AO = llvm::AtomicOrdering::SequentiallyConsistent;
David Majnemera5b195a2015-02-14 01:35:12 +00001750 } else {
JF Bastien92f4ef12016-04-06 17:26:42 +00001751 AO = llvm::AtomicOrdering::Release;
David Majnemera5b195a2015-02-14 01:35:12 +00001752 IsVolatile = true;
1753 }
1754 return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1755}
1756
John McCalla8ec7eb2013-03-07 21:37:17 +00001757/// Emit a store to an l-value of atomic type.
1758///
1759/// Note that the r-value is expected to be an r-value *of the atomic
1760/// type*; this means that for aggregate r-values, it should include
1761/// storage for any padding that was necessary.
David Majnemera5b195a2015-02-14 01:35:12 +00001762void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
1763 llvm::AtomicOrdering AO, bool IsVolatile,
1764 bool isInit) {
John McCalla8ec7eb2013-03-07 21:37:17 +00001765 // If this is an aggregate r-value, it should agree in type except
1766 // maybe for address-space qualification.
1767 assert(!rvalue.isAggregate() ||
John McCall7f416cc2015-09-08 08:05:57 +00001768 rvalue.getAggregateAddress().getElementType()
1769 == dest.getAddress().getElementType());
John McCalla8ec7eb2013-03-07 21:37:17 +00001770
1771 AtomicInfo atomics(*this, dest);
Alexey Bataevb8329262015-02-27 06:33:30 +00001772 LValue LVal = atomics.getAtomicLValue();
John McCalla8ec7eb2013-03-07 21:37:17 +00001773
1774 // If this is an initialization, just put the value there normally.
Alexey Bataevb8329262015-02-27 06:33:30 +00001775 if (LVal.isSimple()) {
1776 if (isInit) {
1777 atomics.emitCopyIntoMemory(rvalue);
1778 return;
1779 }
1780
1781 // Check whether we should use a library call.
1782 if (atomics.shouldUseLibcall()) {
1783 // Produce a source address.
John McCall7f416cc2015-09-08 08:05:57 +00001784 Address srcAddr = atomics.materializeRValue(rvalue);
Alexey Bataevb8329262015-02-27 06:33:30 +00001785
1786 // void __atomic_store(size_t size, void *mem, void *val, int order)
1787 CallArgList args;
1788 args.add(RValue::get(atomics.getAtomicSizeValue()),
1789 getContext().getSizeType());
John McCall7f416cc2015-09-08 08:05:57 +00001790 args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
Alexey Bataevb8329262015-02-27 06:33:30 +00001791 getContext().VoidPtrTy);
John McCall7f416cc2015-09-08 08:05:57 +00001792 args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
1793 getContext().VoidPtrTy);
Alexey Bataevb8329262015-02-27 06:33:30 +00001794 args.add(RValue::get(llvm::ConstantInt::get(
1795 IntTy, AtomicInfo::translateAtomicOrdering(AO))),
1796 getContext().IntTy);
1797 emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1798 return;
1799 }
1800
1801 // Okay, we're doing this natively.
1802 llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
1803
1804 // Do the atomic store.
John McCall7f416cc2015-09-08 08:05:57 +00001805 Address addr =
Alexey Bataevb8329262015-02-27 06:33:30 +00001806 atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
1807 intValue = Builder.CreateIntCast(
John McCall7f416cc2015-09-08 08:05:57 +00001808 intValue, addr.getElementType(), /*isSigned=*/false);
Alexey Bataevb8329262015-02-27 06:33:30 +00001809 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1810
1811 // Initializations don't need to be atomic.
1812 if (!isInit)
1813 store->setAtomic(AO);
1814
1815 // Other decoration.
Alexey Bataevb8329262015-02-27 06:33:30 +00001816 if (IsVolatile)
1817 store->setVolatile(true);
1818 if (dest.getTBAAInfo())
Piotr Padlewski4b1ac722015-09-15 21:46:55 +00001819 CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
John McCalla8ec7eb2013-03-07 21:37:17 +00001820 return;
1821 }
1822
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001823 // Emit simple atomic update operation.
1824 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
John McCalla8ec7eb2013-03-07 21:37:17 +00001825}
1826
Alexey Bataev452d8e12014-12-15 05:25:25 +00001827/// Emit a compare-and-exchange op for atomic type.
1828///
Alexey Bataevb4505a72015-03-30 05:20:59 +00001829std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
Alexey Bataev452d8e12014-12-15 05:25:25 +00001830 LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
1831 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
1832 AggValueSlot Slot) {
1833 // If this is an aggregate r-value, it should agree in type except
1834 // maybe for address-space qualification.
1835 assert(!Expected.isAggregate() ||
John McCall7f416cc2015-09-08 08:05:57 +00001836 Expected.getAggregateAddress().getElementType() ==
1837 Obj.getAddress().getElementType());
Alexey Bataev452d8e12014-12-15 05:25:25 +00001838 assert(!Desired.isAggregate() ||
John McCall7f416cc2015-09-08 08:05:57 +00001839 Desired.getAggregateAddress().getElementType() ==
1840 Obj.getAddress().getElementType());
Alexey Bataev452d8e12014-12-15 05:25:25 +00001841 AtomicInfo Atomics(*this, Obj);
1842
Alexey Bataevb4505a72015-03-30 05:20:59 +00001843 return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
1844 IsWeak);
1845}
1846
1847void CodeGenFunction::EmitAtomicUpdate(
1848 LValue LVal, llvm::AtomicOrdering AO,
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001849 const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
Alexey Bataevb4505a72015-03-30 05:20:59 +00001850 AtomicInfo Atomics(*this, LVal);
Alexey Bataevf0ab5532015-05-15 08:36:34 +00001851 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
Alexey Bataev452d8e12014-12-15 05:25:25 +00001852}
1853
John McCalla8ec7eb2013-03-07 21:37:17 +00001854void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1855 AtomicInfo atomics(*this, dest);
1856
1857 switch (atomics.getEvaluationKind()) {
1858 case TEK_Scalar: {
1859 llvm::Value *value = EmitScalarExpr(init);
Alexey Bataevb57056f2015-01-22 06:17:56 +00001860 atomics.emitCopyIntoMemory(RValue::get(value));
John McCalla8ec7eb2013-03-07 21:37:17 +00001861 return;
1862 }
1863
1864 case TEK_Complex: {
1865 ComplexPairTy value = EmitComplexExpr(init);
Alexey Bataevb57056f2015-01-22 06:17:56 +00001866 atomics.emitCopyIntoMemory(RValue::getComplex(value));
John McCalla8ec7eb2013-03-07 21:37:17 +00001867 return;
1868 }
1869
1870 case TEK_Aggregate: {
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001871 // Fix up the destination if the initializer isn't an expression
1872 // of atomic type.
1873 bool Zeroed = false;
John McCalla8ec7eb2013-03-07 21:37:17 +00001874 if (!init->getType()->isAtomicType()) {
Alexey Bataevb57056f2015-01-22 06:17:56 +00001875 Zeroed = atomics.emitMemSetZeroIfNecessary();
1876 dest = atomics.projectValue();
John McCalla8ec7eb2013-03-07 21:37:17 +00001877 }
1878
1879 // Evaluate the expression directly into the destination.
1880 AggValueSlot slot = AggValueSlot::forLValue(dest,
1881 AggValueSlot::IsNotDestructed,
1882 AggValueSlot::DoesNotNeedGCBarriers,
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001883 AggValueSlot::IsNotAliased,
1884 Zeroed ? AggValueSlot::IsZeroed :
1885 AggValueSlot::IsNotZeroed);
1886
John McCalla8ec7eb2013-03-07 21:37:17 +00001887 EmitAggExpr(init, slot);
1888 return;
1889 }
1890 }
1891 llvm_unreachable("bad evaluation kind");
1892}