blob: b753f9bfe8b33daf5ada38c7a7c8c7adfd6b8a27 [file] [log] [blame]
John McCallfc207f22013-03-07 21:37:12 +00001//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the code for emitting atomic operations.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCall.h"
Alexey Bataevb57056f2015-01-22 06:17:56 +000016#include "CGRecordLayout.h"
John McCallfc207f22013-03-07 21:37:12 +000017#include "CodeGenModule.h"
18#include "clang/AST/ASTContext.h"
Mark Laceya8e7df32013-10-30 21:53:58 +000019#include "clang/CodeGen/CGFunctionInfo.h"
Ed Schoutenc7e82bd2013-05-31 19:27:59 +000020#include "llvm/ADT/StringExtras.h"
John McCallfc207f22013-03-07 21:37:12 +000021#include "llvm/IR/DataLayout.h"
22#include "llvm/IR/Intrinsics.h"
John McCalla8ec7eb2013-03-07 21:37:17 +000023#include "llvm/IR/Operator.h"
John McCallfc207f22013-03-07 21:37:12 +000024
25using namespace clang;
26using namespace CodeGen;
27
John McCalla8ec7eb2013-03-07 21:37:17 +000028namespace {
29 class AtomicInfo {
30 CodeGenFunction &CGF;
31 QualType AtomicTy;
32 QualType ValueTy;
33 uint64_t AtomicSizeInBits;
34 uint64_t ValueSizeInBits;
35 CharUnits AtomicAlign;
36 CharUnits ValueAlign;
37 CharUnits LValueAlign;
38 TypeEvaluationKind EvaluationKind;
39 bool UseLibcall;
Alexey Bataevb57056f2015-01-22 06:17:56 +000040 LValue LVal;
41 CGBitFieldInfo BFI;
John McCalla8ec7eb2013-03-07 21:37:17 +000042 public:
Alexey Bataevb57056f2015-01-22 06:17:56 +000043 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45 EvaluationKind(TEK_Scalar), UseLibcall(true) {
46 assert(!lvalue.isGlobalReg());
John McCalla8ec7eb2013-03-07 21:37:17 +000047 ASTContext &C = CGF.getContext();
Alexey Bataevb57056f2015-01-22 06:17:56 +000048 if (lvalue.isSimple()) {
49 AtomicTy = lvalue.getType();
50 if (auto *ATy = AtomicTy->getAs<AtomicType>())
51 ValueTy = ATy->getValueType();
52 else
53 ValueTy = AtomicTy;
54 EvaluationKind = CGF.getEvaluationKind(ValueTy);
John McCalla8ec7eb2013-03-07 21:37:17 +000055
Alexey Bataevb57056f2015-01-22 06:17:56 +000056 uint64_t ValueAlignInBits;
57 uint64_t AtomicAlignInBits;
58 TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59 ValueSizeInBits = ValueTI.Width;
60 ValueAlignInBits = ValueTI.Align;
John McCalla8ec7eb2013-03-07 21:37:17 +000061
Alexey Bataevb57056f2015-01-22 06:17:56 +000062 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63 AtomicSizeInBits = AtomicTI.Width;
64 AtomicAlignInBits = AtomicTI.Align;
John McCalla8ec7eb2013-03-07 21:37:17 +000065
Alexey Bataevb57056f2015-01-22 06:17:56 +000066 assert(ValueSizeInBits <= AtomicSizeInBits);
67 assert(ValueAlignInBits <= AtomicAlignInBits);
John McCalla8ec7eb2013-03-07 21:37:17 +000068
Alexey Bataevb57056f2015-01-22 06:17:56 +000069 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71 if (lvalue.getAlignment().isZero())
72 lvalue.setAlignment(AtomicAlign);
John McCalla8ec7eb2013-03-07 21:37:17 +000073
Alexey Bataevb57056f2015-01-22 06:17:56 +000074 LVal = lvalue;
75 } else if (lvalue.isBitField()) {
Alexey Bataevb8329262015-02-27 06:33:30 +000076 ValueTy = lvalue.getType();
77 ValueSizeInBits = C.getTypeSize(ValueTy);
Alexey Bataevb57056f2015-01-22 06:17:56 +000078 auto &OrigBFI = lvalue.getBitFieldInfo();
79 auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
80 AtomicSizeInBits = C.toBits(
81 C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
82 .RoundUpToAlignment(lvalue.getAlignment()));
83 auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldAddr());
84 auto OffsetInChars =
85 (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
86 lvalue.getAlignment();
87 VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
88 VoidPtrAddr, OffsetInChars.getQuantity());
89 auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
90 VoidPtrAddr,
91 CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92 "atomic_bitfield_base");
93 BFI = OrigBFI;
94 BFI.Offset = Offset;
95 BFI.StorageSize = AtomicSizeInBits;
96 LVal = LValue::MakeBitfield(Addr, BFI, lvalue.getType(),
97 lvalue.getAlignment());
Alexey Bataevb8329262015-02-27 06:33:30 +000098 LVal.setTBAAInfo(lvalue.getTBAAInfo());
99 AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
100 if (AtomicTy.isNull()) {
101 llvm::APInt Size(
102 /*numBits=*/32,
103 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
104 AtomicTy = C.getConstantArrayType(C.CharTy, Size, ArrayType::Normal,
105 /*IndexTypeQuals=*/0);
106 }
107 AtomicAlign = ValueAlign = lvalue.getAlignment();
Alexey Bataevb57056f2015-01-22 06:17:56 +0000108 } else if (lvalue.isVectorElt()) {
Alexey Bataevb8329262015-02-27 06:33:30 +0000109 ValueTy = lvalue.getType()->getAs<VectorType>()->getElementType();
110 ValueSizeInBits = C.getTypeSize(ValueTy);
111 AtomicTy = lvalue.getType();
112 AtomicSizeInBits = C.getTypeSize(AtomicTy);
113 AtomicAlign = ValueAlign = lvalue.getAlignment();
Alexey Bataevb57056f2015-01-22 06:17:56 +0000114 LVal = lvalue;
115 } else {
116 assert(lvalue.isExtVectorElt());
Alexey Bataevb8329262015-02-27 06:33:30 +0000117 ValueTy = lvalue.getType();
118 ValueSizeInBits = C.getTypeSize(ValueTy);
119 AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
120 lvalue.getType(), lvalue.getExtVectorAddr()
121 ->getType()
122 ->getPointerElementType()
123 ->getVectorNumElements());
124 AtomicSizeInBits = C.getTypeSize(AtomicTy);
125 AtomicAlign = ValueAlign = lvalue.getAlignment();
Alexey Bataevb57056f2015-01-22 06:17:56 +0000126 LVal = lvalue;
127 }
Alexey Bataev452d8e12014-12-15 05:25:25 +0000128 UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
129 AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
John McCalla8ec7eb2013-03-07 21:37:17 +0000130 }
131
132 QualType getAtomicType() const { return AtomicTy; }
133 QualType getValueType() const { return ValueTy; }
134 CharUnits getAtomicAlignment() const { return AtomicAlign; }
135 CharUnits getValueAlignment() const { return ValueAlign; }
136 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
Alexey Bataev452d8e12014-12-15 05:25:25 +0000137 uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
John McCalla8ec7eb2013-03-07 21:37:17 +0000138 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
139 bool shouldUseLibcall() const { return UseLibcall; }
Alexey Bataevb57056f2015-01-22 06:17:56 +0000140 const LValue &getAtomicLValue() const { return LVal; }
Alexey Bataevb8329262015-02-27 06:33:30 +0000141 llvm::Value *getAtomicAddress() const {
142 if (LVal.isSimple())
143 return LVal.getAddress();
144 else if (LVal.isBitField())
145 return LVal.getBitFieldAddr();
146 else if (LVal.isVectorElt())
147 return LVal.getVectorAddr();
148 assert(LVal.isExtVectorElt());
149 return LVal.getExtVectorAddr();
150 }
John McCalla8ec7eb2013-03-07 21:37:17 +0000151
152 /// Is the atomic size larger than the underlying value type?
153 ///
154 /// Note that the absence of padding does not mean that atomic
155 /// objects are completely interchangeable with non-atomic
156 /// objects: we might have promoted the alignment of a type
157 /// without making it bigger.
158 bool hasPadding() const {
159 return (ValueSizeInBits != AtomicSizeInBits);
160 }
161
Alexey Bataevb57056f2015-01-22 06:17:56 +0000162 bool emitMemSetZeroIfNecessary() const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000163
164 llvm::Value *getAtomicSizeValue() const {
165 CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
166 return CGF.CGM.getSize(size);
167 }
168
169 /// Cast the given pointer to an integer pointer suitable for
170 /// atomic operations.
171 llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
172
173 /// Turn an atomic-layout object into an r-value.
Alexey Bataevb8329262015-02-27 06:33:30 +0000174 RValue convertTempToRValue(llvm::Value *addr, AggValueSlot resultSlot,
175 SourceLocation loc, bool AsValue) const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000176
Alexey Bataev452d8e12014-12-15 05:25:25 +0000177 /// \brief Converts a rvalue to integer value.
178 llvm::Value *convertRValueToInt(RValue RVal) const;
179
Alexey Bataevb8329262015-02-27 06:33:30 +0000180 RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
181 AggValueSlot ResultSlot,
182 SourceLocation Loc, bool AsValue) const;
Alexey Bataev452d8e12014-12-15 05:25:25 +0000183
John McCalla8ec7eb2013-03-07 21:37:17 +0000184 /// Copy an atomic r-value into atomic-layout memory.
Alexey Bataevb57056f2015-01-22 06:17:56 +0000185 void emitCopyIntoMemory(RValue rvalue) const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000186
187 /// Project an l-value down to the value field.
Alexey Bataevb57056f2015-01-22 06:17:56 +0000188 LValue projectValue() const {
189 assert(LVal.isSimple());
Alexey Bataevb8329262015-02-27 06:33:30 +0000190 llvm::Value *addr = getAtomicAddress();
John McCalla8ec7eb2013-03-07 21:37:17 +0000191 if (hasPadding())
192 addr = CGF.Builder.CreateStructGEP(addr, 0);
193
Alexey Bataevb57056f2015-01-22 06:17:56 +0000194 return LValue::MakeAddr(addr, getValueType(), LVal.getAlignment(),
195 CGF.getContext(), LVal.getTBAAInfo());
John McCalla8ec7eb2013-03-07 21:37:17 +0000196 }
197
Alexey Bataevb8329262015-02-27 06:33:30 +0000198 /// \brief Emits atomic load.
199 /// \returns Loaded value.
200 RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
201 bool AsValue, llvm::AtomicOrdering AO,
202 bool IsVolatile);
203
204 /// \brief Emits atomic compare-and-exchange sequence.
205 /// \param Expected Expected value.
206 /// \param Desired Desired value.
207 /// \param Success Atomic ordering for success operation.
208 /// \param Failure Atomic ordering for failed operation.
209 /// \param IsWeak true if atomic operation is weak, false otherwise.
210 /// \returns Pair of values: previous value from storage (value type) and
211 /// boolean flag (i1 type) with true if success and false otherwise.
212 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchange(
213 RValue Expected, RValue Desired,
214 llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
215 llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
216 bool IsWeak = false);
217
John McCalla8ec7eb2013-03-07 21:37:17 +0000218 /// Materialize an atomic r-value in atomic-layout memory.
219 llvm::Value *materializeRValue(RValue rvalue) const;
220
Alexey Bataevb8329262015-02-27 06:33:30 +0000221 /// \brief Translates LLVM atomic ordering to GNU atomic ordering for
222 /// libcalls.
223 static AtomicExpr::AtomicOrderingKind
224 translateAtomicOrdering(const llvm::AtomicOrdering AO);
225
John McCalla8ec7eb2013-03-07 21:37:17 +0000226 private:
227 bool requiresMemSetZero(llvm::Type *type) const;
Alexey Bataevb8329262015-02-27 06:33:30 +0000228
229 /// \brief Creates temp alloca for intermediate operations on atomic value.
230 llvm::Value *CreateTempAlloca() const;
231
232 /// \brief Emits atomic load as a libcall.
233 void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
234 llvm::AtomicOrdering AO, bool IsVolatile);
235 /// \brief Emits atomic load as LLVM instruction.
236 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
237 /// \brief Emits atomic compare-and-exchange op as a libcall.
238 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeLibcall(
239 llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
240 llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
241 llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent);
242 /// \brief Emits atomic compare-and-exchange op as LLVM instruction.
243 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
244 llvm::Value *Expected, llvm::Value *Desired,
245 llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
246 llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
247 bool IsWeak = false);
John McCalla8ec7eb2013-03-07 21:37:17 +0000248 };
249}
250
Alexey Bataevb8329262015-02-27 06:33:30 +0000251AtomicExpr::AtomicOrderingKind
252AtomicInfo::translateAtomicOrdering(const llvm::AtomicOrdering AO) {
253 switch (AO) {
254 case llvm::Unordered:
255 case llvm::NotAtomic:
256 case llvm::Monotonic:
257 return AtomicExpr::AO_ABI_memory_order_relaxed;
258 case llvm::Acquire:
259 return AtomicExpr::AO_ABI_memory_order_acquire;
260 case llvm::Release:
261 return AtomicExpr::AO_ABI_memory_order_release;
262 case llvm::AcquireRelease:
263 return AtomicExpr::AO_ABI_memory_order_acq_rel;
264 case llvm::SequentiallyConsistent:
265 return AtomicExpr::AO_ABI_memory_order_seq_cst;
266 }
267}
268
269llvm::Value *AtomicInfo::CreateTempAlloca() const {
270 auto *TempAlloca = CGF.CreateMemTemp(
271 (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
272 : AtomicTy,
273 "atomic-temp");
274 TempAlloca->setAlignment(getAtomicAlignment().getQuantity());
275 // Cast to pointer to value type for bitfields.
276 if (LVal.isBitField())
277 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
278 TempAlloca, getAtomicAddress()->getType());
279 return TempAlloca;
280}
281
John McCalla8ec7eb2013-03-07 21:37:17 +0000282static RValue emitAtomicLibcall(CodeGenFunction &CGF,
283 StringRef fnName,
284 QualType resultType,
285 CallArgList &args) {
286 const CGFunctionInfo &fnInfo =
287 CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
288 FunctionType::ExtInfo(), RequiredArgs::All);
289 llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
290 llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
291 return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
292}
293
294/// Does a store of the given IR type modify the full expected width?
295static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
296 uint64_t expectedSize) {
297 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
298}
299
300/// Does the atomic type require memsetting to zero before initialization?
301///
302/// The IR type is provided as a way of making certain queries faster.
303bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
304 // If the atomic type has size padding, we definitely need a memset.
305 if (hasPadding()) return true;
306
307 // Otherwise, do some simple heuristics to try to avoid it:
308 switch (getEvaluationKind()) {
309 // For scalars and complexes, check whether the store size of the
310 // type uses the full size.
311 case TEK_Scalar:
312 return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
313 case TEK_Complex:
314 return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
315 AtomicSizeInBits / 2);
316
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000317 // Padding in structs has an undefined bit pattern. User beware.
John McCalla8ec7eb2013-03-07 21:37:17 +0000318 case TEK_Aggregate:
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000319 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000320 }
321 llvm_unreachable("bad evaluation kind");
322}
323
Alexey Bataevb57056f2015-01-22 06:17:56 +0000324bool AtomicInfo::emitMemSetZeroIfNecessary() const {
325 assert(LVal.isSimple());
326 llvm::Value *addr = LVal.getAddress();
John McCalla8ec7eb2013-03-07 21:37:17 +0000327 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000328 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000329
Alexey Bataevb8329262015-02-27 06:33:30 +0000330 CGF.Builder.CreateMemSet(
331 addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
332 CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
333 LVal.getAlignment().getQuantity());
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000334 return true;
John McCalla8ec7eb2013-03-07 21:37:17 +0000335}
336
Tim Northovercadbbe12014-06-13 19:43:04 +0000337static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
Tim Northover9c177222014-03-13 19:25:48 +0000338 llvm::Value *Dest, llvm::Value *Ptr,
339 llvm::Value *Val1, llvm::Value *Val2,
340 uint64_t Size, unsigned Align,
341 llvm::AtomicOrdering SuccessOrder,
342 llvm::AtomicOrdering FailureOrder) {
343 // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
344 llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
345 Expected->setAlignment(Align);
346 llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
347 Desired->setAlignment(Align);
348
Tim Northoverb49b04b2014-06-13 14:24:59 +0000349 llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
Tim Northover9c177222014-03-13 19:25:48 +0000350 Ptr, Expected, Desired, SuccessOrder, FailureOrder);
Tim Northoverb49b04b2014-06-13 14:24:59 +0000351 Pair->setVolatile(E->isVolatile());
Tim Northovercadbbe12014-06-13 19:43:04 +0000352 Pair->setWeak(IsWeak);
Tim Northover9c177222014-03-13 19:25:48 +0000353
354 // Cmp holds the result of the compare-exchange operation: true on success,
355 // false on failure.
Tim Northoverb49b04b2014-06-13 14:24:59 +0000356 llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
357 llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
Tim Northover9c177222014-03-13 19:25:48 +0000358
359 // This basic block is used to hold the store instruction if the operation
360 // failed.
361 llvm::BasicBlock *StoreExpectedBB =
362 CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
363
364 // This basic block is the exit point of the operation, we should end up
365 // here regardless of whether or not the operation succeeded.
366 llvm::BasicBlock *ContinueBB =
367 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
368
369 // Update Expected if Expected isn't equal to Old, otherwise branch to the
370 // exit point.
371 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
372
373 CGF.Builder.SetInsertPoint(StoreExpectedBB);
374 // Update the memory at Expected with Old's value.
375 llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
376 StoreExpected->setAlignment(Align);
377 // Finally, branch to the exit point.
378 CGF.Builder.CreateBr(ContinueBB);
379
380 CGF.Builder.SetInsertPoint(ContinueBB);
381 // Update the memory at Dest with Cmp's value.
382 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
383 return;
384}
385
386/// Given an ordering required on success, emit all possible cmpxchg
387/// instructions to cope with the provided (but possibly only dynamically known)
388/// FailureOrder.
389static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
Tim Northovercadbbe12014-06-13 19:43:04 +0000390 bool IsWeak, llvm::Value *Dest,
391 llvm::Value *Ptr, llvm::Value *Val1,
392 llvm::Value *Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000393 llvm::Value *FailureOrderVal,
394 uint64_t Size, unsigned Align,
395 llvm::AtomicOrdering SuccessOrder) {
396 llvm::AtomicOrdering FailureOrder;
397 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
398 switch (FO->getSExtValue()) {
399 default:
400 FailureOrder = llvm::Monotonic;
401 break;
402 case AtomicExpr::AO_ABI_memory_order_consume:
403 case AtomicExpr::AO_ABI_memory_order_acquire:
404 FailureOrder = llvm::Acquire;
405 break;
406 case AtomicExpr::AO_ABI_memory_order_seq_cst:
407 FailureOrder = llvm::SequentiallyConsistent;
408 break;
409 }
410 if (FailureOrder >= SuccessOrder) {
411 // Don't assert on undefined behaviour.
412 FailureOrder =
413 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
414 }
Tim Northovercadbbe12014-06-13 19:43:04 +0000415 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align,
416 SuccessOrder, FailureOrder);
Tim Northover9c177222014-03-13 19:25:48 +0000417 return;
418 }
419
420 // Create all the relevant BB's
Craig Topper8a13c412014-05-21 05:09:00 +0000421 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
422 *SeqCstBB = nullptr;
Tim Northover9c177222014-03-13 19:25:48 +0000423 MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
424 if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
425 AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
426 if (SuccessOrder == llvm::SequentiallyConsistent)
427 SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
428
429 llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
430
431 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
432
433 // Emit all the different atomics
434
435 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
436 // doesn't matter unless someone is crazy enough to use something that
437 // doesn't fold to a constant for the ordering.
438 CGF.Builder.SetInsertPoint(MonotonicBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000439 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000440 Size, Align, SuccessOrder, llvm::Monotonic);
441 CGF.Builder.CreateBr(ContBB);
442
443 if (AcquireBB) {
444 CGF.Builder.SetInsertPoint(AcquireBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000445 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000446 Size, Align, SuccessOrder, llvm::Acquire);
447 CGF.Builder.CreateBr(ContBB);
448 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
449 AcquireBB);
450 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
451 AcquireBB);
452 }
453 if (SeqCstBB) {
454 CGF.Builder.SetInsertPoint(SeqCstBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000455 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000456 Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
457 CGF.Builder.CreateBr(ContBB);
458 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
459 SeqCstBB);
460 }
461
462 CGF.Builder.SetInsertPoint(ContBB);
463}
464
465static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
466 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
Tim Northovercadbbe12014-06-13 19:43:04 +0000467 llvm::Value *IsWeak, llvm::Value *FailureOrder,
468 uint64_t Size, unsigned Align,
469 llvm::AtomicOrdering Order) {
John McCallfc207f22013-03-07 21:37:12 +0000470 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
471 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
472
473 switch (E->getOp()) {
474 case AtomicExpr::AO__c11_atomic_init:
475 llvm_unreachable("Already handled!");
476
477 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
Tim Northovercadbbe12014-06-13 19:43:04 +0000478 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
479 FailureOrder, Size, Align, Order);
John McCallfc207f22013-03-07 21:37:12 +0000480 return;
Tim Northovercadbbe12014-06-13 19:43:04 +0000481 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
482 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
483 FailureOrder, Size, Align, Order);
484 return;
485 case AtomicExpr::AO__atomic_compare_exchange:
486 case AtomicExpr::AO__atomic_compare_exchange_n: {
487 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
488 emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
489 Val1, Val2, FailureOrder, Size, Align, Order);
490 } else {
491 // Create all the relevant BB's
492 llvm::BasicBlock *StrongBB =
493 CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
494 llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
495 llvm::BasicBlock *ContBB =
496 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
497
498 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
499 SI->addCase(CGF.Builder.getInt1(false), StrongBB);
500
501 CGF.Builder.SetInsertPoint(StrongBB);
502 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
503 FailureOrder, Size, Align, Order);
504 CGF.Builder.CreateBr(ContBB);
505
506 CGF.Builder.SetInsertPoint(WeakBB);
507 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
508 FailureOrder, Size, Align, Order);
509 CGF.Builder.CreateBr(ContBB);
510
511 CGF.Builder.SetInsertPoint(ContBB);
512 }
513 return;
514 }
John McCallfc207f22013-03-07 21:37:12 +0000515 case AtomicExpr::AO__c11_atomic_load:
516 case AtomicExpr::AO__atomic_load_n:
517 case AtomicExpr::AO__atomic_load: {
518 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
519 Load->setAtomic(Order);
520 Load->setAlignment(Size);
521 Load->setVolatile(E->isVolatile());
522 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
523 StoreDest->setAlignment(Align);
524 return;
525 }
526
527 case AtomicExpr::AO__c11_atomic_store:
528 case AtomicExpr::AO__atomic_store:
529 case AtomicExpr::AO__atomic_store_n: {
530 assert(!Dest && "Store does not return a value");
531 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
532 LoadVal1->setAlignment(Align);
533 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
534 Store->setAtomic(Order);
535 Store->setAlignment(Size);
536 Store->setVolatile(E->isVolatile());
537 return;
538 }
539
540 case AtomicExpr::AO__c11_atomic_exchange:
541 case AtomicExpr::AO__atomic_exchange_n:
542 case AtomicExpr::AO__atomic_exchange:
543 Op = llvm::AtomicRMWInst::Xchg;
544 break;
545
546 case AtomicExpr::AO__atomic_add_fetch:
547 PostOp = llvm::Instruction::Add;
548 // Fall through.
549 case AtomicExpr::AO__c11_atomic_fetch_add:
550 case AtomicExpr::AO__atomic_fetch_add:
551 Op = llvm::AtomicRMWInst::Add;
552 break;
553
554 case AtomicExpr::AO__atomic_sub_fetch:
555 PostOp = llvm::Instruction::Sub;
556 // Fall through.
557 case AtomicExpr::AO__c11_atomic_fetch_sub:
558 case AtomicExpr::AO__atomic_fetch_sub:
559 Op = llvm::AtomicRMWInst::Sub;
560 break;
561
562 case AtomicExpr::AO__atomic_and_fetch:
563 PostOp = llvm::Instruction::And;
564 // Fall through.
565 case AtomicExpr::AO__c11_atomic_fetch_and:
566 case AtomicExpr::AO__atomic_fetch_and:
567 Op = llvm::AtomicRMWInst::And;
568 break;
569
570 case AtomicExpr::AO__atomic_or_fetch:
571 PostOp = llvm::Instruction::Or;
572 // Fall through.
573 case AtomicExpr::AO__c11_atomic_fetch_or:
574 case AtomicExpr::AO__atomic_fetch_or:
575 Op = llvm::AtomicRMWInst::Or;
576 break;
577
578 case AtomicExpr::AO__atomic_xor_fetch:
579 PostOp = llvm::Instruction::Xor;
580 // Fall through.
581 case AtomicExpr::AO__c11_atomic_fetch_xor:
582 case AtomicExpr::AO__atomic_fetch_xor:
583 Op = llvm::AtomicRMWInst::Xor;
584 break;
585
586 case AtomicExpr::AO__atomic_nand_fetch:
587 PostOp = llvm::Instruction::And;
588 // Fall through.
589 case AtomicExpr::AO__atomic_fetch_nand:
590 Op = llvm::AtomicRMWInst::Nand;
591 break;
592 }
593
594 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
595 LoadVal1->setAlignment(Align);
596 llvm::AtomicRMWInst *RMWI =
597 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
598 RMWI->setVolatile(E->isVolatile());
599
600 // For __atomic_*_fetch operations, perform the operation again to
601 // determine the value which was written.
602 llvm::Value *Result = RMWI;
603 if (PostOp)
604 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
605 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
606 Result = CGF.Builder.CreateNot(Result);
607 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
608 StoreDest->setAlignment(Align);
609}
610
611// This function emits any expression (scalar, complex, or aggregate)
612// into a temporary alloca.
613static llvm::Value *
614EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
615 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
616 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
617 /*Init*/ true);
618 return DeclPtr;
619}
620
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000621static void
622AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000623 bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000624 SourceLocation Loc, CharUnits SizeInChars) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000625 if (UseOptimizedLibcall) {
626 // Load value and pass it to the function directly.
627 unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
David Majnemer0392cf82014-08-29 07:27:49 +0000628 int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
629 ValTy =
630 CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
631 llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
632 SizeInBits)->getPointerTo();
633 Val = CGF.EmitLoadOfScalar(CGF.Builder.CreateBitCast(Val, IPtrTy), false,
634 Align, CGF.getContext().getPointerType(ValTy),
635 Loc);
636 // Coerce the value into an appropriately sized integer type.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000637 Args.add(RValue::get(Val), ValTy);
638 } else {
639 // Non-optimized functions always take a reference.
640 Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
641 CGF.getContext().VoidPtrTy);
642 }
643}
644
John McCallfc207f22013-03-07 21:37:12 +0000645RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
646 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
647 QualType MemTy = AtomicTy;
648 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
649 MemTy = AT->getValueType();
650 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
651 uint64_t Size = sizeChars.getQuantity();
652 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
653 unsigned Align = alignChars.getQuantity();
654 unsigned MaxInlineWidthInBits =
John McCallc8e01702013-04-16 22:48:15 +0000655 getTarget().getMaxAtomicInlineWidth();
John McCallfc207f22013-03-07 21:37:12 +0000656 bool UseLibcall = (Size != Align ||
657 getContext().toBits(sizeChars) > MaxInlineWidthInBits);
658
Tim Northovercadbbe12014-06-13 19:43:04 +0000659 llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr,
660 *Val2 = nullptr;
Craig Topper8a13c412014-05-21 05:09:00 +0000661 llvm::Value *Ptr = EmitScalarExpr(E->getPtr());
John McCallfc207f22013-03-07 21:37:12 +0000662
663 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
664 assert(!Dest && "Init does not return a value");
John McCalla8ec7eb2013-03-07 21:37:17 +0000665 LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
666 EmitAtomicInit(E->getVal1(), lvalue);
Craig Topper8a13c412014-05-21 05:09:00 +0000667 return RValue::get(nullptr);
John McCallfc207f22013-03-07 21:37:12 +0000668 }
669
Craig Topper8a13c412014-05-21 05:09:00 +0000670 llvm::Value *Order = EmitScalarExpr(E->getOrder());
John McCallfc207f22013-03-07 21:37:12 +0000671
672 switch (E->getOp()) {
673 case AtomicExpr::AO__c11_atomic_init:
674 llvm_unreachable("Already handled!");
675
676 case AtomicExpr::AO__c11_atomic_load:
677 case AtomicExpr::AO__atomic_load_n:
678 break;
679
680 case AtomicExpr::AO__atomic_load:
681 Dest = EmitScalarExpr(E->getVal1());
682 break;
683
684 case AtomicExpr::AO__atomic_store:
685 Val1 = EmitScalarExpr(E->getVal1());
686 break;
687
688 case AtomicExpr::AO__atomic_exchange:
689 Val1 = EmitScalarExpr(E->getVal1());
690 Dest = EmitScalarExpr(E->getVal2());
691 break;
692
693 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
694 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
695 case AtomicExpr::AO__atomic_compare_exchange_n:
696 case AtomicExpr::AO__atomic_compare_exchange:
697 Val1 = EmitScalarExpr(E->getVal1());
698 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
699 Val2 = EmitScalarExpr(E->getVal2());
700 else
701 Val2 = EmitValToTemp(*this, E->getVal2());
702 OrderFail = EmitScalarExpr(E->getOrderFail());
John McCallfc207f22013-03-07 21:37:12 +0000703 if (E->getNumSubExprs() == 6)
Tim Northovercadbbe12014-06-13 19:43:04 +0000704 IsWeak = EmitScalarExpr(E->getWeak());
John McCallfc207f22013-03-07 21:37:12 +0000705 break;
706
707 case AtomicExpr::AO__c11_atomic_fetch_add:
708 case AtomicExpr::AO__c11_atomic_fetch_sub:
709 if (MemTy->isPointerType()) {
710 // For pointer arithmetic, we're required to do a bit of math:
711 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
712 // ... but only for the C11 builtins. The GNU builtins expect the
713 // user to multiply by sizeof(T).
714 QualType Val1Ty = E->getVal1()->getType();
715 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
716 CharUnits PointeeIncAmt =
717 getContext().getTypeSizeInChars(MemTy->getPointeeType());
718 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
719 Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
720 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
721 break;
722 }
723 // Fall through.
724 case AtomicExpr::AO__atomic_fetch_add:
725 case AtomicExpr::AO__atomic_fetch_sub:
726 case AtomicExpr::AO__atomic_add_fetch:
727 case AtomicExpr::AO__atomic_sub_fetch:
728 case AtomicExpr::AO__c11_atomic_store:
729 case AtomicExpr::AO__c11_atomic_exchange:
730 case AtomicExpr::AO__atomic_store_n:
731 case AtomicExpr::AO__atomic_exchange_n:
732 case AtomicExpr::AO__c11_atomic_fetch_and:
733 case AtomicExpr::AO__c11_atomic_fetch_or:
734 case AtomicExpr::AO__c11_atomic_fetch_xor:
735 case AtomicExpr::AO__atomic_fetch_and:
736 case AtomicExpr::AO__atomic_fetch_or:
737 case AtomicExpr::AO__atomic_fetch_xor:
738 case AtomicExpr::AO__atomic_fetch_nand:
739 case AtomicExpr::AO__atomic_and_fetch:
740 case AtomicExpr::AO__atomic_or_fetch:
741 case AtomicExpr::AO__atomic_xor_fetch:
742 case AtomicExpr::AO__atomic_nand_fetch:
743 Val1 = EmitValToTemp(*this, E->getVal1());
744 break;
745 }
746
David Majnemeree8d04d2014-12-12 08:16:09 +0000747 QualType RValTy = E->getType().getUnqualifiedType();
748
David Majnemer659be552014-11-25 23:44:32 +0000749 auto GetDest = [&] {
David Majnemeree8d04d2014-12-12 08:16:09 +0000750 if (!RValTy->isVoidType() && !Dest) {
751 Dest = CreateMemTemp(RValTy, ".atomicdst");
752 }
David Majnemer659be552014-11-25 23:44:32 +0000753 return Dest;
754 };
John McCallfc207f22013-03-07 21:37:12 +0000755
756 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
757 if (UseLibcall) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000758 bool UseOptimizedLibcall = false;
759 switch (E->getOp()) {
760 case AtomicExpr::AO__c11_atomic_fetch_add:
761 case AtomicExpr::AO__atomic_fetch_add:
762 case AtomicExpr::AO__c11_atomic_fetch_and:
763 case AtomicExpr::AO__atomic_fetch_and:
764 case AtomicExpr::AO__c11_atomic_fetch_or:
765 case AtomicExpr::AO__atomic_fetch_or:
766 case AtomicExpr::AO__c11_atomic_fetch_sub:
767 case AtomicExpr::AO__atomic_fetch_sub:
768 case AtomicExpr::AO__c11_atomic_fetch_xor:
769 case AtomicExpr::AO__atomic_fetch_xor:
770 // For these, only library calls for certain sizes exist.
771 UseOptimizedLibcall = true;
772 break;
773 default:
774 // Only use optimized library calls for sizes for which they exist.
775 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
776 UseOptimizedLibcall = true;
777 break;
778 }
John McCallfc207f22013-03-07 21:37:12 +0000779
John McCallfc207f22013-03-07 21:37:12 +0000780 CallArgList Args;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000781 if (!UseOptimizedLibcall) {
782 // For non-optimized library calls, the size is the first parameter
783 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
784 getContext().getSizeType());
785 }
786 // Atomic address is the first or second parameter
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000787 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
John McCallfc207f22013-03-07 21:37:12 +0000788
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000789 std::string LibCallName;
Logan Chien74798a32014-03-26 17:35:01 +0000790 QualType LoweredMemTy =
791 MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000792 QualType RetTy;
793 bool HaveRetTy = false;
John McCallfc207f22013-03-07 21:37:12 +0000794 switch (E->getOp()) {
795 // There is only one libcall for compare an exchange, because there is no
796 // optimisation benefit possible from a libcall version of a weak compare
797 // and exchange.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000798 // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
John McCallfc207f22013-03-07 21:37:12 +0000799 // void *desired, int success, int failure)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000800 // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
801 // int success, int failure)
John McCallfc207f22013-03-07 21:37:12 +0000802 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
803 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
804 case AtomicExpr::AO__atomic_compare_exchange:
805 case AtomicExpr::AO__atomic_compare_exchange_n:
806 LibCallName = "__atomic_compare_exchange";
807 RetTy = getContext().BoolTy;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000808 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000809 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
810 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000811 E->getExprLoc(), sizeChars);
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000812 Args.add(RValue::get(Order), getContext().IntTy);
John McCallfc207f22013-03-07 21:37:12 +0000813 Order = OrderFail;
814 break;
815 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
816 // int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000817 // T __atomic_exchange_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000818 case AtomicExpr::AO__c11_atomic_exchange:
819 case AtomicExpr::AO__atomic_exchange_n:
820 case AtomicExpr::AO__atomic_exchange:
821 LibCallName = "__atomic_exchange";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000822 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000823 E->getExprLoc(), sizeChars);
John McCallfc207f22013-03-07 21:37:12 +0000824 break;
825 // void __atomic_store(size_t size, void *mem, void *val, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000826 // void __atomic_store_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000827 case AtomicExpr::AO__c11_atomic_store:
828 case AtomicExpr::AO__atomic_store:
829 case AtomicExpr::AO__atomic_store_n:
830 LibCallName = "__atomic_store";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000831 RetTy = getContext().VoidTy;
832 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000833 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000834 E->getExprLoc(), sizeChars);
John McCallfc207f22013-03-07 21:37:12 +0000835 break;
836 // void __atomic_load(size_t size, void *mem, void *return, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000837 // T __atomic_load_N(T *mem, int order)
John McCallfc207f22013-03-07 21:37:12 +0000838 case AtomicExpr::AO__c11_atomic_load:
839 case AtomicExpr::AO__atomic_load:
840 case AtomicExpr::AO__atomic_load_n:
841 LibCallName = "__atomic_load";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000842 break;
843 // T __atomic_fetch_add_N(T *mem, T val, int order)
844 case AtomicExpr::AO__c11_atomic_fetch_add:
845 case AtomicExpr::AO__atomic_fetch_add:
846 LibCallName = "__atomic_fetch_add";
Logan Chien74798a32014-03-26 17:35:01 +0000847 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000848 E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000849 break;
850 // T __atomic_fetch_and_N(T *mem, T val, int order)
851 case AtomicExpr::AO__c11_atomic_fetch_and:
852 case AtomicExpr::AO__atomic_fetch_and:
853 LibCallName = "__atomic_fetch_and";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000854 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000855 E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000856 break;
857 // T __atomic_fetch_or_N(T *mem, T val, int order)
858 case AtomicExpr::AO__c11_atomic_fetch_or:
859 case AtomicExpr::AO__atomic_fetch_or:
860 LibCallName = "__atomic_fetch_or";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000861 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000862 E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000863 break;
864 // T __atomic_fetch_sub_N(T *mem, T val, int order)
865 case AtomicExpr::AO__c11_atomic_fetch_sub:
866 case AtomicExpr::AO__atomic_fetch_sub:
867 LibCallName = "__atomic_fetch_sub";
Logan Chien74798a32014-03-26 17:35:01 +0000868 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000869 E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000870 break;
871 // T __atomic_fetch_xor_N(T *mem, T val, int order)
872 case AtomicExpr::AO__c11_atomic_fetch_xor:
873 case AtomicExpr::AO__atomic_fetch_xor:
874 LibCallName = "__atomic_fetch_xor";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000875 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000876 E->getExprLoc(), sizeChars);
John McCallfc207f22013-03-07 21:37:12 +0000877 break;
John McCallfc207f22013-03-07 21:37:12 +0000878 default: return EmitUnsupportedRValue(E, "atomic library call");
879 }
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000880
881 // Optimized functions have the size in their name.
882 if (UseOptimizedLibcall)
883 LibCallName += "_" + llvm::utostr(Size);
884 // By default, assume we return a value of the atomic type.
885 if (!HaveRetTy) {
886 if (UseOptimizedLibcall) {
887 // Value is returned directly.
David Majnemer0392cf82014-08-29 07:27:49 +0000888 // The function returns an appropriately sized integer type.
889 RetTy = getContext().getIntTypeForBitwidth(
890 getContext().toBits(sizeChars), /*Signed=*/false);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000891 } else {
892 // Value is returned through parameter before the order.
893 RetTy = getContext().VoidTy;
David Majnemer659be552014-11-25 23:44:32 +0000894 Args.add(RValue::get(EmitCastToVoidPtr(Dest)), getContext().VoidPtrTy);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000895 }
896 }
John McCallfc207f22013-03-07 21:37:12 +0000897 // order is always the last parameter
898 Args.add(RValue::get(Order),
899 getContext().IntTy);
900
David Majnemer659be552014-11-25 23:44:32 +0000901 RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
902 // The value is returned directly from the libcall.
903 if (HaveRetTy && !RetTy->isVoidType())
904 return Res;
905 // The value is returned via an explicit out param.
906 if (RetTy->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +0000907 return RValue::get(nullptr);
David Majnemer659be552014-11-25 23:44:32 +0000908 // The value is returned directly for optimized libcalls but the caller is
909 // expected an out-param.
910 if (UseOptimizedLibcall) {
911 llvm::Value *ResVal = Res.getScalarVal();
912 llvm::StoreInst *StoreDest = Builder.CreateStore(
913 ResVal,
914 Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo()));
915 StoreDest->setAlignment(Align);
916 }
David Majnemeree8d04d2014-12-12 08:16:09 +0000917 return convertTempToRValue(Dest, RValTy, E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000918 }
919
920 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
921 E->getOp() == AtomicExpr::AO__atomic_store ||
922 E->getOp() == AtomicExpr::AO__atomic_store_n;
923 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
924 E->getOp() == AtomicExpr::AO__atomic_load ||
925 E->getOp() == AtomicExpr::AO__atomic_load_n;
926
David Majnemerd8cd8f72014-11-22 10:44:12 +0000927 llvm::Type *ITy =
928 llvm::IntegerType::get(getLLVMContext(), Size * 8);
David Majnemer659be552014-11-25 23:44:32 +0000929 llvm::Value *OrigDest = GetDest();
David Majnemerd8cd8f72014-11-22 10:44:12 +0000930 Ptr = Builder.CreateBitCast(
931 Ptr, ITy->getPointerTo(Ptr->getType()->getPointerAddressSpace()));
932 if (Val1) Val1 = Builder.CreateBitCast(Val1, ITy->getPointerTo());
933 if (Val2) Val2 = Builder.CreateBitCast(Val2, ITy->getPointerTo());
934 if (Dest && !E->isCmpXChg())
935 Dest = Builder.CreateBitCast(Dest, ITy->getPointerTo());
John McCallfc207f22013-03-07 21:37:12 +0000936
937 if (isa<llvm::ConstantInt>(Order)) {
938 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
939 switch (ord) {
Tim Northovere94a34c2014-03-11 10:49:14 +0000940 case AtomicExpr::AO_ABI_memory_order_relaxed:
Tim Northovercadbbe12014-06-13 19:43:04 +0000941 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000942 Size, Align, llvm::Monotonic);
John McCallfc207f22013-03-07 21:37:12 +0000943 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000944 case AtomicExpr::AO_ABI_memory_order_consume:
945 case AtomicExpr::AO_ABI_memory_order_acquire:
John McCallfc207f22013-03-07 21:37:12 +0000946 if (IsStore)
947 break; // Avoid crashing on code with undefined behavior
Tim Northovercadbbe12014-06-13 19:43:04 +0000948 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000949 Size, Align, llvm::Acquire);
John McCallfc207f22013-03-07 21:37:12 +0000950 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000951 case AtomicExpr::AO_ABI_memory_order_release:
John McCallfc207f22013-03-07 21:37:12 +0000952 if (IsLoad)
953 break; // Avoid crashing on code with undefined behavior
Tim Northovercadbbe12014-06-13 19:43:04 +0000954 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000955 Size, Align, llvm::Release);
John McCallfc207f22013-03-07 21:37:12 +0000956 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000957 case AtomicExpr::AO_ABI_memory_order_acq_rel:
John McCallfc207f22013-03-07 21:37:12 +0000958 if (IsLoad || IsStore)
959 break; // Avoid crashing on code with undefined behavior
Tim Northovercadbbe12014-06-13 19:43:04 +0000960 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000961 Size, Align, llvm::AcquireRelease);
John McCallfc207f22013-03-07 21:37:12 +0000962 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000963 case AtomicExpr::AO_ABI_memory_order_seq_cst:
Tim Northovercadbbe12014-06-13 19:43:04 +0000964 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000965 Size, Align, llvm::SequentiallyConsistent);
John McCallfc207f22013-03-07 21:37:12 +0000966 break;
967 default: // invalid order
968 // We should not ever get here normally, but it's hard to
969 // enforce that in general.
970 break;
971 }
David Majnemeree8d04d2014-12-12 08:16:09 +0000972 if (RValTy->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +0000973 return RValue::get(nullptr);
David Majnemeree8d04d2014-12-12 08:16:09 +0000974 return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000975 }
976
977 // Long case, when Order isn't obviously constant.
978
979 // Create all the relevant BB's
Craig Topper8a13c412014-05-21 05:09:00 +0000980 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
981 *ReleaseBB = nullptr, *AcqRelBB = nullptr,
982 *SeqCstBB = nullptr;
John McCallfc207f22013-03-07 21:37:12 +0000983 MonotonicBB = createBasicBlock("monotonic", CurFn);
984 if (!IsStore)
985 AcquireBB = createBasicBlock("acquire", CurFn);
986 if (!IsLoad)
987 ReleaseBB = createBasicBlock("release", CurFn);
988 if (!IsLoad && !IsStore)
989 AcqRelBB = createBasicBlock("acqrel", CurFn);
990 SeqCstBB = createBasicBlock("seqcst", CurFn);
991 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
992
993 // Create the switch for the split
994 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
995 // doesn't matter unless someone is crazy enough to use something that
996 // doesn't fold to a constant for the ordering.
997 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
998 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
999
1000 // Emit all the different atomics
1001 Builder.SetInsertPoint(MonotonicBB);
Tim Northovercadbbe12014-06-13 19:43:04 +00001002 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +00001003 Size, Align, llvm::Monotonic);
John McCallfc207f22013-03-07 21:37:12 +00001004 Builder.CreateBr(ContBB);
1005 if (!IsStore) {
1006 Builder.SetInsertPoint(AcquireBB);
Tim Northovercadbbe12014-06-13 19:43:04 +00001007 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +00001008 Size, Align, llvm::Acquire);
John McCallfc207f22013-03-07 21:37:12 +00001009 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +00001010 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
1011 AcquireBB);
1012 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
1013 AcquireBB);
John McCallfc207f22013-03-07 21:37:12 +00001014 }
1015 if (!IsLoad) {
1016 Builder.SetInsertPoint(ReleaseBB);
Tim Northovercadbbe12014-06-13 19:43:04 +00001017 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +00001018 Size, Align, llvm::Release);
John McCallfc207f22013-03-07 21:37:12 +00001019 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +00001020 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
1021 ReleaseBB);
John McCallfc207f22013-03-07 21:37:12 +00001022 }
1023 if (!IsLoad && !IsStore) {
1024 Builder.SetInsertPoint(AcqRelBB);
Tim Northovercadbbe12014-06-13 19:43:04 +00001025 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +00001026 Size, Align, llvm::AcquireRelease);
John McCallfc207f22013-03-07 21:37:12 +00001027 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +00001028 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
1029 AcqRelBB);
John McCallfc207f22013-03-07 21:37:12 +00001030 }
1031 Builder.SetInsertPoint(SeqCstBB);
Tim Northovercadbbe12014-06-13 19:43:04 +00001032 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +00001033 Size, Align, llvm::SequentiallyConsistent);
John McCallfc207f22013-03-07 21:37:12 +00001034 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +00001035 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
1036 SeqCstBB);
John McCallfc207f22013-03-07 21:37:12 +00001037
1038 // Cleanup and return
1039 Builder.SetInsertPoint(ContBB);
David Majnemeree8d04d2014-12-12 08:16:09 +00001040 if (RValTy->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +00001041 return RValue::get(nullptr);
David Majnemeree8d04d2014-12-12 08:16:09 +00001042 return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +00001043}
John McCalla8ec7eb2013-03-07 21:37:17 +00001044
1045llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
1046 unsigned addrspace =
1047 cast<llvm::PointerType>(addr->getType())->getAddressSpace();
1048 llvm::IntegerType *ty =
1049 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1050 return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1051}
1052
1053RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
Nick Lewycky2d84e842013-10-02 02:29:49 +00001054 AggValueSlot resultSlot,
Alexey Bataevb8329262015-02-27 06:33:30 +00001055 SourceLocation loc, bool AsValue) const {
Alexey Bataevb57056f2015-01-22 06:17:56 +00001056 if (LVal.isSimple()) {
1057 if (EvaluationKind == TEK_Aggregate)
1058 return resultSlot.asRValue();
John McCalla8ec7eb2013-03-07 21:37:17 +00001059
Alexey Bataevb57056f2015-01-22 06:17:56 +00001060 // Drill into the padding structure if we have one.
1061 if (hasPadding())
1062 addr = CGF.Builder.CreateStructGEP(addr, 0);
John McCalla8ec7eb2013-03-07 21:37:17 +00001063
Alexey Bataevb57056f2015-01-22 06:17:56 +00001064 // Otherwise, just convert the temporary to an r-value using the
1065 // normal conversion routine.
1066 return CGF.convertTempToRValue(addr, getValueType(), loc);
Alexey Bataevb8329262015-02-27 06:33:30 +00001067 } else if (!AsValue)
1068 // Get RValue from temp memory as atomic for non-simple lvalues
1069 return RValue::get(
1070 CGF.Builder.CreateAlignedLoad(addr, AtomicAlign.getQuantity()));
1071 else if (LVal.isBitField())
Alexey Bataevb57056f2015-01-22 06:17:56 +00001072 return CGF.EmitLoadOfBitfieldLValue(LValue::MakeBitfield(
1073 addr, LVal.getBitFieldInfo(), LVal.getType(), LVal.getAlignment()));
1074 else if (LVal.isVectorElt())
1075 return CGF.EmitLoadOfLValue(LValue::MakeVectorElt(addr, LVal.getVectorIdx(),
1076 LVal.getType(),
1077 LVal.getAlignment()),
1078 loc);
1079 assert(LVal.isExtVectorElt());
1080 return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
1081 addr, LVal.getExtVectorElts(), LVal.getType(), LVal.getAlignment()));
John McCalla8ec7eb2013-03-07 21:37:17 +00001082}
1083
Alexey Bataevb8329262015-02-27 06:33:30 +00001084RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1085 AggValueSlot ResultSlot,
1086 SourceLocation Loc,
1087 bool AsValue) const {
Alexey Bataev452d8e12014-12-15 05:25:25 +00001088 // Try not to in some easy cases.
1089 assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
Alexey Bataevb8329262015-02-27 06:33:30 +00001090 if (getEvaluationKind() == TEK_Scalar &&
1091 (((!LVal.isBitField() ||
1092 LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1093 !hasPadding()) ||
1094 !AsValue)) {
1095 auto *ValTy = AsValue
1096 ? CGF.ConvertTypeForMem(ValueTy)
1097 : getAtomicAddress()->getType()->getPointerElementType();
Alexey Bataev452d8e12014-12-15 05:25:25 +00001098 if (ValTy->isIntegerTy()) {
1099 assert(IntVal->getType() == ValTy && "Different integer types.");
David Majnemereeaec262015-02-14 02:18:14 +00001100 return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
Alexey Bataev452d8e12014-12-15 05:25:25 +00001101 } else if (ValTy->isPointerTy())
1102 return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1103 else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1104 return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1105 }
1106
1107 // Create a temporary. This needs to be big enough to hold the
1108 // atomic integer.
1109 llvm::Value *Temp;
1110 bool TempIsVolatile = false;
1111 CharUnits TempAlignment;
Alexey Bataevb8329262015-02-27 06:33:30 +00001112 if (AsValue && getEvaluationKind() == TEK_Aggregate) {
Alexey Bataev452d8e12014-12-15 05:25:25 +00001113 assert(!ResultSlot.isIgnored());
1114 Temp = ResultSlot.getAddr();
1115 TempAlignment = getValueAlignment();
1116 TempIsVolatile = ResultSlot.isVolatile();
1117 } else {
Alexey Bataevb8329262015-02-27 06:33:30 +00001118 Temp = CreateTempAlloca();
Alexey Bataev452d8e12014-12-15 05:25:25 +00001119 TempAlignment = getAtomicAlignment();
1120 }
1121
1122 // Slam the integer into the temporary.
1123 llvm::Value *CastTemp = emitCastToAtomicIntPointer(Temp);
1124 CGF.Builder.CreateAlignedStore(IntVal, CastTemp, TempAlignment.getQuantity())
1125 ->setVolatile(TempIsVolatile);
1126
Alexey Bataevb8329262015-02-27 06:33:30 +00001127 return convertTempToRValue(Temp, ResultSlot, Loc, AsValue);
1128}
1129
1130void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1131 llvm::AtomicOrdering AO, bool) {
1132 // void __atomic_load(size_t size, void *mem, void *return, int order);
1133 CallArgList Args;
1134 Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1135 Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicAddress())),
1136 CGF.getContext().VoidPtrTy);
1137 Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1138 CGF.getContext().VoidPtrTy);
1139 Args.add(RValue::get(
1140 llvm::ConstantInt::get(CGF.IntTy, translateAtomicOrdering(AO))),
1141 CGF.getContext().IntTy);
1142 emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1143}
1144
1145llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1146 bool IsVolatile) {
1147 // Okay, we're doing this natively.
1148 llvm::Value *Addr = emitCastToAtomicIntPointer(getAtomicAddress());
1149 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1150 Load->setAtomic(AO);
1151
1152 // Other decoration.
1153 Load->setAlignment(getAtomicAlignment().getQuantity());
1154 if (IsVolatile)
1155 Load->setVolatile(true);
1156 if (LVal.getTBAAInfo())
1157 CGF.CGM.DecorateInstruction(Load, LVal.getTBAAInfo());
1158 return Load;
Alexey Bataev452d8e12014-12-15 05:25:25 +00001159}
1160
David Majnemera5b195a2015-02-14 01:35:12 +00001161/// An LValue is a candidate for having its loads and stores be made atomic if
1162/// we are operating under /volatile:ms *and* the LValue itself is volatile and
1163/// performing such an operation can be performed without a libcall.
1164bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
1165 AtomicInfo AI(*this, LV);
1166 bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1167 // An atomic is inline if we don't need to use a libcall.
1168 bool AtomicIsInline = !AI.shouldUseLibcall();
1169 return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline;
1170}
1171
1172/// An type is a candidate for having its loads and stores be made atomic if
1173/// we are operating under /volatile:ms *and* we know the access is volatile and
1174/// performing such an operation can be performed without a libcall.
1175bool CodeGenFunction::typeIsSuitableForInlineAtomic(QualType Ty,
1176 bool IsVolatile) const {
1177 // An atomic is inline if we don't need to use a libcall (e.g. it is builtin).
1178 bool AtomicIsInline = getContext().getTargetInfo().hasBuiltinAtomic(
1179 getContext().getTypeSize(Ty), getContext().getTypeAlign(Ty));
1180 return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline;
1181}
1182
1183RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
1184 AggValueSlot Slot) {
1185 llvm::AtomicOrdering AO;
1186 bool IsVolatile = LV.isVolatileQualified();
1187 if (LV.getType()->isAtomicType()) {
1188 AO = llvm::SequentiallyConsistent;
1189 } else {
1190 AO = llvm::Acquire;
1191 IsVolatile = true;
1192 }
1193 return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1194}
1195
Alexey Bataevb8329262015-02-27 06:33:30 +00001196RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1197 bool AsValue, llvm::AtomicOrdering AO,
1198 bool IsVolatile) {
1199 // Check whether we should use a library call.
1200 if (shouldUseLibcall()) {
1201 llvm::Value *TempAddr;
1202 if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1203 assert(getEvaluationKind() == TEK_Aggregate);
1204 TempAddr = ResultSlot.getAddr();
1205 } else
1206 TempAddr = CreateTempAlloca();
1207
1208 EmitAtomicLoadLibcall(TempAddr, AO, IsVolatile);
1209
1210 // Okay, turn that back into the original value or whole atomic (for
1211 // non-simple lvalues) type.
1212 return convertTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1213 }
1214
1215 // Okay, we're doing this natively.
1216 auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1217
1218 // If we're ignoring an aggregate return, don't do anything.
1219 if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1220 return RValue::getAggregate(nullptr, false);
1221
1222 // Okay, turn that back into the original value or atomic (for non-simple
1223 // lvalues) type.
1224 return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1225}
1226
John McCalla8ec7eb2013-03-07 21:37:17 +00001227/// Emit a load from an l-value of atomic type. Note that the r-value
1228/// we produce is an r-value of the atomic *value* type.
Nick Lewycky2d84e842013-10-02 02:29:49 +00001229RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
David Majnemera5b195a2015-02-14 01:35:12 +00001230 llvm::AtomicOrdering AO, bool IsVolatile,
Nick Lewycky2d84e842013-10-02 02:29:49 +00001231 AggValueSlot resultSlot) {
Alexey Bataevb8329262015-02-27 06:33:30 +00001232 AtomicInfo Atomics(*this, src);
1233 return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1234 IsVolatile);
John McCalla8ec7eb2013-03-07 21:37:17 +00001235}
1236
John McCalla8ec7eb2013-03-07 21:37:17 +00001237/// Copy an r-value into memory as part of storing to an atomic type.
1238/// This needs to create a bit-pattern suitable for atomic operations.
Alexey Bataevb57056f2015-01-22 06:17:56 +00001239void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1240 assert(LVal.isSimple());
John McCalla8ec7eb2013-03-07 21:37:17 +00001241 // If we have an r-value, the rvalue should be of the atomic type,
1242 // which means that the caller is responsible for having zeroed
1243 // any padding. Just do an aggregate copy of that type.
1244 if (rvalue.isAggregate()) {
Alexey Bataevb8329262015-02-27 06:33:30 +00001245 CGF.EmitAggregateCopy(getAtomicAddress(),
John McCalla8ec7eb2013-03-07 21:37:17 +00001246 rvalue.getAggregateAddr(),
1247 getAtomicType(),
1248 (rvalue.isVolatileQualified()
Alexey Bataevb57056f2015-01-22 06:17:56 +00001249 || LVal.isVolatileQualified()),
1250 LVal.getAlignment());
John McCalla8ec7eb2013-03-07 21:37:17 +00001251 return;
1252 }
1253
1254 // Okay, otherwise we're copying stuff.
1255
1256 // Zero out the buffer if necessary.
Alexey Bataevb57056f2015-01-22 06:17:56 +00001257 emitMemSetZeroIfNecessary();
John McCalla8ec7eb2013-03-07 21:37:17 +00001258
1259 // Drill past the padding if present.
Alexey Bataevb57056f2015-01-22 06:17:56 +00001260 LValue TempLVal = projectValue();
John McCalla8ec7eb2013-03-07 21:37:17 +00001261
1262 // Okay, store the rvalue in.
1263 if (rvalue.isScalar()) {
Alexey Bataevb57056f2015-01-22 06:17:56 +00001264 CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
John McCalla8ec7eb2013-03-07 21:37:17 +00001265 } else {
Alexey Bataevb57056f2015-01-22 06:17:56 +00001266 CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
John McCalla8ec7eb2013-03-07 21:37:17 +00001267 }
1268}
1269
1270
1271/// Materialize an r-value into memory for the purposes of storing it
1272/// to an atomic type.
1273llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
1274 // Aggregate r-values are already in memory, and EmitAtomicStore
1275 // requires them to be values of the atomic type.
1276 if (rvalue.isAggregate())
1277 return rvalue.getAggregateAddr();
1278
1279 // Otherwise, make a temporary and materialize into it.
Alexey Bataevb8329262015-02-27 06:33:30 +00001280 LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType(),
1281 getAtomicAlignment());
1282 AtomicInfo Atomics(CGF, TempLV);
Alexey Bataevb57056f2015-01-22 06:17:56 +00001283 Atomics.emitCopyIntoMemory(rvalue);
Alexey Bataevb8329262015-02-27 06:33:30 +00001284 return TempLV.getAddress();
John McCalla8ec7eb2013-03-07 21:37:17 +00001285}
1286
Alexey Bataev452d8e12014-12-15 05:25:25 +00001287llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1288 // If we've got a scalar value of the right size, try to avoid going
1289 // through memory.
Alexey Bataevb8329262015-02-27 06:33:30 +00001290 if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
Alexey Bataev452d8e12014-12-15 05:25:25 +00001291 llvm::Value *Value = RVal.getScalarVal();
1292 if (isa<llvm::IntegerType>(Value->getType()))
1293 return Value;
1294 else {
Alexey Bataevb8329262015-02-27 06:33:30 +00001295 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1296 CGF.getLLVMContext(),
1297 LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
Alexey Bataev452d8e12014-12-15 05:25:25 +00001298 if (isa<llvm::PointerType>(Value->getType()))
1299 return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1300 else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1301 return CGF.Builder.CreateBitCast(Value, InputIntTy);
1302 }
1303 }
1304 // Otherwise, we need to go through memory.
1305 // Put the r-value in memory.
1306 llvm::Value *Addr = materializeRValue(RVal);
1307
1308 // Cast the temporary to the atomic int type and pull a value out.
1309 Addr = emitCastToAtomicIntPointer(Addr);
1310 return CGF.Builder.CreateAlignedLoad(Addr,
1311 getAtomicAlignment().getQuantity());
1312}
1313
Alexey Bataevb8329262015-02-27 06:33:30 +00001314std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1315 llvm::Value *Expected, llvm::Value *Desired, llvm::AtomicOrdering Success,
1316 llvm::AtomicOrdering Failure, bool IsWeak) {
1317 // Do the atomic store.
1318 auto *Addr = emitCastToAtomicIntPointer(getAtomicAddress());
1319 auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr, Expected, Desired, Success,
1320 Failure);
1321 // Other decoration.
1322 Inst->setVolatile(LVal.isVolatileQualified());
1323 Inst->setWeak(IsWeak);
1324
1325 // Okay, turn that back into the original value type.
1326 auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1327 auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1328 return std::make_pair(PreviousVal, SuccessFailureVal);
1329}
1330
1331std::pair<llvm::Value *, llvm::Value *>
1332AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1333 llvm::Value *DesiredAddr,
1334 llvm::AtomicOrdering Success,
1335 llvm::AtomicOrdering Failure) {
1336 // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1337 // void *desired, int success, int failure);
1338 CallArgList Args;
1339 Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1340 Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicAddress())),
1341 CGF.getContext().VoidPtrTy);
1342 Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1343 CGF.getContext().VoidPtrTy);
1344 Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1345 CGF.getContext().VoidPtrTy);
1346 Args.add(RValue::get(llvm::ConstantInt::get(
1347 CGF.IntTy, translateAtomicOrdering(Success))),
1348 CGF.getContext().IntTy);
1349 Args.add(RValue::get(llvm::ConstantInt::get(
1350 CGF.IntTy, translateAtomicOrdering(Failure))),
1351 CGF.getContext().IntTy);
1352 auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1353 CGF.getContext().BoolTy, Args);
1354 auto *PreviousVal = CGF.Builder.CreateAlignedLoad(
1355 ExpectedAddr, getValueAlignment().getQuantity());
1356 return std::make_pair(PreviousVal, SuccessFailureRVal.getScalarVal());
1357}
1358
1359std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1360 RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1361 llvm::AtomicOrdering Failure, bool IsWeak) {
1362 if (Failure >= Success)
1363 // Don't assert on undefined behavior.
1364 Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1365
1366 // Check whether we should use a library call.
1367 if (shouldUseLibcall()) {
1368 auto *ExpectedAddr = materializeRValue(Expected);
1369 // Produce a source address.
1370 auto *DesiredAddr = materializeRValue(Desired);
1371 return EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, Success,
1372 Failure);
1373 }
1374
1375 // If we've got a scalar value of the right size, try to avoid going
1376 // through memory.
1377 auto *ExpectedIntVal = convertRValueToInt(Expected);
1378 auto *DesiredIntVal = convertRValueToInt(Desired);
1379
1380 return EmitAtomicCompareExchangeOp(ExpectedIntVal, DesiredIntVal, Success,
1381 Failure, IsWeak);
1382}
1383
David Majnemera5b195a2015-02-14 01:35:12 +00001384void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
1385 bool isInit) {
1386 bool IsVolatile = lvalue.isVolatileQualified();
1387 llvm::AtomicOrdering AO;
1388 if (lvalue.getType()->isAtomicType()) {
1389 AO = llvm::SequentiallyConsistent;
1390 } else {
1391 AO = llvm::Release;
1392 IsVolatile = true;
1393 }
1394 return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1395}
1396
John McCalla8ec7eb2013-03-07 21:37:17 +00001397/// Emit a store to an l-value of atomic type.
1398///
1399/// Note that the r-value is expected to be an r-value *of the atomic
1400/// type*; this means that for aggregate r-values, it should include
1401/// storage for any padding that was necessary.
David Majnemera5b195a2015-02-14 01:35:12 +00001402void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
1403 llvm::AtomicOrdering AO, bool IsVolatile,
1404 bool isInit) {
John McCalla8ec7eb2013-03-07 21:37:17 +00001405 // If this is an aggregate r-value, it should agree in type except
1406 // maybe for address-space qualification.
1407 assert(!rvalue.isAggregate() ||
1408 rvalue.getAggregateAddr()->getType()->getPointerElementType()
1409 == dest.getAddress()->getType()->getPointerElementType());
1410
1411 AtomicInfo atomics(*this, dest);
Alexey Bataevb8329262015-02-27 06:33:30 +00001412 LValue LVal = atomics.getAtomicLValue();
John McCalla8ec7eb2013-03-07 21:37:17 +00001413
1414 // If this is an initialization, just put the value there normally.
Alexey Bataevb8329262015-02-27 06:33:30 +00001415 if (LVal.isSimple()) {
1416 if (isInit) {
1417 atomics.emitCopyIntoMemory(rvalue);
1418 return;
1419 }
1420
1421 // Check whether we should use a library call.
1422 if (atomics.shouldUseLibcall()) {
1423 // Produce a source address.
1424 llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
1425
1426 // void __atomic_store(size_t size, void *mem, void *val, int order)
1427 CallArgList args;
1428 args.add(RValue::get(atomics.getAtomicSizeValue()),
1429 getContext().getSizeType());
1430 args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicAddress())),
1431 getContext().VoidPtrTy);
1432 args.add(RValue::get(EmitCastToVoidPtr(srcAddr)), getContext().VoidPtrTy);
1433 args.add(RValue::get(llvm::ConstantInt::get(
1434 IntTy, AtomicInfo::translateAtomicOrdering(AO))),
1435 getContext().IntTy);
1436 emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1437 return;
1438 }
1439
1440 // Okay, we're doing this natively.
1441 llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
1442
1443 // Do the atomic store.
1444 llvm::Value *addr =
1445 atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
1446 intValue = Builder.CreateIntCast(
1447 intValue, addr->getType()->getPointerElementType(), /*isSigned=*/false);
1448 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1449
1450 // Initializations don't need to be atomic.
1451 if (!isInit)
1452 store->setAtomic(AO);
1453
1454 // Other decoration.
1455 store->setAlignment(dest.getAlignment().getQuantity());
1456 if (IsVolatile)
1457 store->setVolatile(true);
1458 if (dest.getTBAAInfo())
1459 CGM.DecorateInstruction(store, dest.getTBAAInfo());
John McCalla8ec7eb2013-03-07 21:37:17 +00001460 return;
1461 }
1462
Alexey Bataevb8329262015-02-27 06:33:30 +00001463 // Atomic load of prev value.
1464 RValue OldRVal =
1465 atomics.EmitAtomicLoad(AggValueSlot::ignored(), SourceLocation(),
1466 /*AsValue=*/false, AO, IsVolatile);
1467 // For non-simple lvalues perform compare-and-swap procedure.
1468 auto *ContBB = createBasicBlock("atomic_cont");
1469 auto *ExitBB = createBasicBlock("atomic_exit");
1470 auto *CurBB = Builder.GetInsertBlock();
1471 EmitBlock(ContBB);
1472 llvm::PHINode *PHI = Builder.CreatePHI(OldRVal.getScalarVal()->getType(),
1473 /*NumReservedValues=*/2);
1474 PHI->addIncoming(OldRVal.getScalarVal(), CurBB);
1475 RValue OriginalRValue = RValue::get(PHI);
1476 // Build new lvalue for temp address
1477 auto *Ptr = atomics.materializeRValue(OriginalRValue);
1478 // Build new lvalue for temp address
1479 LValue UpdateLVal;
1480 if (LVal.isBitField())
1481 UpdateLVal = LValue::MakeBitfield(Ptr, LVal.getBitFieldInfo(),
1482 LVal.getType(), LVal.getAlignment());
1483 else if (LVal.isVectorElt())
1484 UpdateLVal = LValue::MakeVectorElt(Ptr, LVal.getVectorIdx(), LVal.getType(),
1485 LVal.getAlignment());
1486 else {
1487 assert(LVal.isExtVectorElt());
1488 UpdateLVal = LValue::MakeExtVectorElt(Ptr, LVal.getExtVectorElts(),
1489 LVal.getType(), LVal.getAlignment());
John McCalla8ec7eb2013-03-07 21:37:17 +00001490 }
Alexey Bataevb8329262015-02-27 06:33:30 +00001491 UpdateLVal.setTBAAInfo(LVal.getTBAAInfo());
1492 // Store new value in the corresponding memory area
1493 EmitStoreThroughLValue(rvalue, UpdateLVal);
1494 // Load new value
1495 RValue NewRValue = RValue::get(EmitLoadOfScalar(
1496 Ptr, LVal.isVolatile(), atomics.getAtomicAlignment().getQuantity(),
1497 atomics.getAtomicType(), SourceLocation()));
1498 // Try to write new value using cmpxchg operation
1499 auto Pair = atomics.EmitAtomicCompareExchange(OriginalRValue, NewRValue, AO);
1500 llvm::Value *OldValue = Pair.first;
1501 if (!atomics.shouldUseLibcall())
1502 // Convert integer value to original atomic type
1503 OldValue = atomics.ConvertIntToValueOrAtomic(
1504 OldValue, AggValueSlot::ignored(), SourceLocation(),
1505 /*AsValue=*/false).getScalarVal();
1506 PHI->addIncoming(OldValue, ContBB);
1507 Builder.CreateCondBr(Pair.second, ContBB, ExitBB);
1508 EmitBlock(ExitBB);
John McCalla8ec7eb2013-03-07 21:37:17 +00001509}
1510
Alexey Bataev452d8e12014-12-15 05:25:25 +00001511/// Emit a compare-and-exchange op for atomic type.
1512///
1513std::pair<RValue, RValue> CodeGenFunction::EmitAtomicCompareExchange(
1514 LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
1515 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
1516 AggValueSlot Slot) {
1517 // If this is an aggregate r-value, it should agree in type except
1518 // maybe for address-space qualification.
1519 assert(!Expected.isAggregate() ||
1520 Expected.getAggregateAddr()->getType()->getPointerElementType() ==
1521 Obj.getAddress()->getType()->getPointerElementType());
1522 assert(!Desired.isAggregate() ||
1523 Desired.getAggregateAddr()->getType()->getPointerElementType() ==
1524 Obj.getAddress()->getType()->getPointerElementType());
1525 AtomicInfo Atomics(*this, Obj);
1526
Alexey Bataevb8329262015-02-27 06:33:30 +00001527 auto Pair = Atomics.EmitAtomicCompareExchange(Expected, Desired, Success,
1528 Failure, IsWeak);
1529 return std::make_pair(Atomics.shouldUseLibcall()
1530 ? RValue::get(Pair.first)
1531 : Atomics.ConvertIntToValueOrAtomic(
1532 Pair.first, Slot, Loc, /*AsValue=*/true),
1533 RValue::get(Pair.second));
Alexey Bataev452d8e12014-12-15 05:25:25 +00001534}
1535
John McCalla8ec7eb2013-03-07 21:37:17 +00001536void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1537 AtomicInfo atomics(*this, dest);
1538
1539 switch (atomics.getEvaluationKind()) {
1540 case TEK_Scalar: {
1541 llvm::Value *value = EmitScalarExpr(init);
Alexey Bataevb57056f2015-01-22 06:17:56 +00001542 atomics.emitCopyIntoMemory(RValue::get(value));
John McCalla8ec7eb2013-03-07 21:37:17 +00001543 return;
1544 }
1545
1546 case TEK_Complex: {
1547 ComplexPairTy value = EmitComplexExpr(init);
Alexey Bataevb57056f2015-01-22 06:17:56 +00001548 atomics.emitCopyIntoMemory(RValue::getComplex(value));
John McCalla8ec7eb2013-03-07 21:37:17 +00001549 return;
1550 }
1551
1552 case TEK_Aggregate: {
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001553 // Fix up the destination if the initializer isn't an expression
1554 // of atomic type.
1555 bool Zeroed = false;
John McCalla8ec7eb2013-03-07 21:37:17 +00001556 if (!init->getType()->isAtomicType()) {
Alexey Bataevb57056f2015-01-22 06:17:56 +00001557 Zeroed = atomics.emitMemSetZeroIfNecessary();
1558 dest = atomics.projectValue();
John McCalla8ec7eb2013-03-07 21:37:17 +00001559 }
1560
1561 // Evaluate the expression directly into the destination.
1562 AggValueSlot slot = AggValueSlot::forLValue(dest,
1563 AggValueSlot::IsNotDestructed,
1564 AggValueSlot::DoesNotNeedGCBarriers,
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001565 AggValueSlot::IsNotAliased,
1566 Zeroed ? AggValueSlot::IsZeroed :
1567 AggValueSlot::IsNotZeroed);
1568
John McCalla8ec7eb2013-03-07 21:37:17 +00001569 EmitAggExpr(init, slot);
1570 return;
1571 }
1572 }
1573 llvm_unreachable("bad evaluation kind");
1574}