blob: daac174c8e0c06cfb5cc5aa00455259d13fb0c93 [file] [log] [blame]
John McCallfc207f22013-03-07 21:37:12 +00001//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the code for emitting atomic operations.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCall.h"
16#include "CodeGenModule.h"
17#include "clang/AST/ASTContext.h"
Mark Laceya8e7df32013-10-30 21:53:58 +000018#include "clang/CodeGen/CGFunctionInfo.h"
Ed Schoutenc7e82bd2013-05-31 19:27:59 +000019#include "llvm/ADT/StringExtras.h"
John McCallfc207f22013-03-07 21:37:12 +000020#include "llvm/IR/DataLayout.h"
21#include "llvm/IR/Intrinsics.h"
John McCalla8ec7eb2013-03-07 21:37:17 +000022#include "llvm/IR/Operator.h"
John McCallfc207f22013-03-07 21:37:12 +000023
24using namespace clang;
25using namespace CodeGen;
26
John McCalla8ec7eb2013-03-07 21:37:17 +000027namespace {
28 class AtomicInfo {
29 CodeGenFunction &CGF;
30 QualType AtomicTy;
31 QualType ValueTy;
32 uint64_t AtomicSizeInBits;
33 uint64_t ValueSizeInBits;
34 CharUnits AtomicAlign;
35 CharUnits ValueAlign;
36 CharUnits LValueAlign;
37 TypeEvaluationKind EvaluationKind;
38 bool UseLibcall;
39 public:
40 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
41 assert(lvalue.isSimple());
42
43 AtomicTy = lvalue.getType();
44 ValueTy = AtomicTy->castAs<AtomicType>()->getValueType();
45 EvaluationKind = CGF.getEvaluationKind(ValueTy);
46
47 ASTContext &C = CGF.getContext();
48
David Majnemer34b57492014-07-30 01:30:47 +000049 uint64_t ValueAlignInBits;
50 uint64_t AtomicAlignInBits;
51 TypeInfo ValueTI = C.getTypeInfo(ValueTy);
52 ValueSizeInBits = ValueTI.Width;
53 ValueAlignInBits = ValueTI.Align;
John McCalla8ec7eb2013-03-07 21:37:17 +000054
David Majnemer34b57492014-07-30 01:30:47 +000055 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
56 AtomicSizeInBits = AtomicTI.Width;
57 AtomicAlignInBits = AtomicTI.Align;
John McCalla8ec7eb2013-03-07 21:37:17 +000058
59 assert(ValueSizeInBits <= AtomicSizeInBits);
David Majnemer34b57492014-07-30 01:30:47 +000060 assert(ValueAlignInBits <= AtomicAlignInBits);
John McCalla8ec7eb2013-03-07 21:37:17 +000061
David Majnemer34b57492014-07-30 01:30:47 +000062 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
63 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
John McCalla8ec7eb2013-03-07 21:37:17 +000064 if (lvalue.getAlignment().isZero())
65 lvalue.setAlignment(AtomicAlign);
66
Alexey Bataev452d8e12014-12-15 05:25:25 +000067 UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
68 AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
John McCalla8ec7eb2013-03-07 21:37:17 +000069 }
70
71 QualType getAtomicType() const { return AtomicTy; }
72 QualType getValueType() const { return ValueTy; }
73 CharUnits getAtomicAlignment() const { return AtomicAlign; }
74 CharUnits getValueAlignment() const { return ValueAlign; }
75 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
Alexey Bataev452d8e12014-12-15 05:25:25 +000076 uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
John McCalla8ec7eb2013-03-07 21:37:17 +000077 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
78 bool shouldUseLibcall() const { return UseLibcall; }
79
80 /// Is the atomic size larger than the underlying value type?
81 ///
82 /// Note that the absence of padding does not mean that atomic
83 /// objects are completely interchangeable with non-atomic
84 /// objects: we might have promoted the alignment of a type
85 /// without making it bigger.
86 bool hasPadding() const {
87 return (ValueSizeInBits != AtomicSizeInBits);
88 }
89
Eli Friedmanbe4504d2013-07-11 01:32:21 +000090 bool emitMemSetZeroIfNecessary(LValue dest) const;
John McCalla8ec7eb2013-03-07 21:37:17 +000091
92 llvm::Value *getAtomicSizeValue() const {
93 CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
94 return CGF.CGM.getSize(size);
95 }
96
97 /// Cast the given pointer to an integer pointer suitable for
98 /// atomic operations.
99 llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
100
101 /// Turn an atomic-layout object into an r-value.
102 RValue convertTempToRValue(llvm::Value *addr,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000103 AggValueSlot resultSlot,
104 SourceLocation loc) const;
John McCalla8ec7eb2013-03-07 21:37:17 +0000105
Alexey Bataev452d8e12014-12-15 05:25:25 +0000106 /// \brief Converts a rvalue to integer value.
107 llvm::Value *convertRValueToInt(RValue RVal) const;
108
109 RValue convertIntToValue(llvm::Value *IntVal, AggValueSlot ResultSlot,
110 SourceLocation Loc) const;
111
John McCalla8ec7eb2013-03-07 21:37:17 +0000112 /// Copy an atomic r-value into atomic-layout memory.
113 void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
114
115 /// Project an l-value down to the value field.
116 LValue projectValue(LValue lvalue) const {
117 llvm::Value *addr = lvalue.getAddress();
118 if (hasPadding())
119 addr = CGF.Builder.CreateStructGEP(addr, 0);
120
121 return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(),
122 CGF.getContext(), lvalue.getTBAAInfo());
123 }
124
125 /// Materialize an atomic r-value in atomic-layout memory.
126 llvm::Value *materializeRValue(RValue rvalue) const;
127
128 private:
129 bool requiresMemSetZero(llvm::Type *type) const;
130 };
131}
132
133static RValue emitAtomicLibcall(CodeGenFunction &CGF,
134 StringRef fnName,
135 QualType resultType,
136 CallArgList &args) {
137 const CGFunctionInfo &fnInfo =
138 CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
139 FunctionType::ExtInfo(), RequiredArgs::All);
140 llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
141 llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
142 return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
143}
144
145/// Does a store of the given IR type modify the full expected width?
146static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
147 uint64_t expectedSize) {
148 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
149}
150
151/// Does the atomic type require memsetting to zero before initialization?
152///
153/// The IR type is provided as a way of making certain queries faster.
154bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
155 // If the atomic type has size padding, we definitely need a memset.
156 if (hasPadding()) return true;
157
158 // Otherwise, do some simple heuristics to try to avoid it:
159 switch (getEvaluationKind()) {
160 // For scalars and complexes, check whether the store size of the
161 // type uses the full size.
162 case TEK_Scalar:
163 return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
164 case TEK_Complex:
165 return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
166 AtomicSizeInBits / 2);
167
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000168 // Padding in structs has an undefined bit pattern. User beware.
John McCalla8ec7eb2013-03-07 21:37:17 +0000169 case TEK_Aggregate:
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000170 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000171 }
172 llvm_unreachable("bad evaluation kind");
173}
174
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000175bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
John McCalla8ec7eb2013-03-07 21:37:17 +0000176 llvm::Value *addr = dest.getAddress();
177 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000178 return false;
John McCalla8ec7eb2013-03-07 21:37:17 +0000179
180 CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
181 AtomicSizeInBits / 8,
182 dest.getAlignment().getQuantity());
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000183 return true;
John McCalla8ec7eb2013-03-07 21:37:17 +0000184}
185
Tim Northovercadbbe12014-06-13 19:43:04 +0000186static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
Tim Northover9c177222014-03-13 19:25:48 +0000187 llvm::Value *Dest, llvm::Value *Ptr,
188 llvm::Value *Val1, llvm::Value *Val2,
189 uint64_t Size, unsigned Align,
190 llvm::AtomicOrdering SuccessOrder,
191 llvm::AtomicOrdering FailureOrder) {
192 // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
193 llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
194 Expected->setAlignment(Align);
195 llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
196 Desired->setAlignment(Align);
197
Tim Northoverb49b04b2014-06-13 14:24:59 +0000198 llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
Tim Northover9c177222014-03-13 19:25:48 +0000199 Ptr, Expected, Desired, SuccessOrder, FailureOrder);
Tim Northoverb49b04b2014-06-13 14:24:59 +0000200 Pair->setVolatile(E->isVolatile());
Tim Northovercadbbe12014-06-13 19:43:04 +0000201 Pair->setWeak(IsWeak);
Tim Northover9c177222014-03-13 19:25:48 +0000202
203 // Cmp holds the result of the compare-exchange operation: true on success,
204 // false on failure.
Tim Northoverb49b04b2014-06-13 14:24:59 +0000205 llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
206 llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
Tim Northover9c177222014-03-13 19:25:48 +0000207
208 // This basic block is used to hold the store instruction if the operation
209 // failed.
210 llvm::BasicBlock *StoreExpectedBB =
211 CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
212
213 // This basic block is the exit point of the operation, we should end up
214 // here regardless of whether or not the operation succeeded.
215 llvm::BasicBlock *ContinueBB =
216 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
217
218 // Update Expected if Expected isn't equal to Old, otherwise branch to the
219 // exit point.
220 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
221
222 CGF.Builder.SetInsertPoint(StoreExpectedBB);
223 // Update the memory at Expected with Old's value.
224 llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
225 StoreExpected->setAlignment(Align);
226 // Finally, branch to the exit point.
227 CGF.Builder.CreateBr(ContinueBB);
228
229 CGF.Builder.SetInsertPoint(ContinueBB);
230 // Update the memory at Dest with Cmp's value.
231 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
232 return;
233}
234
235/// Given an ordering required on success, emit all possible cmpxchg
236/// instructions to cope with the provided (but possibly only dynamically known)
237/// FailureOrder.
238static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
Tim Northovercadbbe12014-06-13 19:43:04 +0000239 bool IsWeak, llvm::Value *Dest,
240 llvm::Value *Ptr, llvm::Value *Val1,
241 llvm::Value *Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000242 llvm::Value *FailureOrderVal,
243 uint64_t Size, unsigned Align,
244 llvm::AtomicOrdering SuccessOrder) {
245 llvm::AtomicOrdering FailureOrder;
246 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
247 switch (FO->getSExtValue()) {
248 default:
249 FailureOrder = llvm::Monotonic;
250 break;
251 case AtomicExpr::AO_ABI_memory_order_consume:
252 case AtomicExpr::AO_ABI_memory_order_acquire:
253 FailureOrder = llvm::Acquire;
254 break;
255 case AtomicExpr::AO_ABI_memory_order_seq_cst:
256 FailureOrder = llvm::SequentiallyConsistent;
257 break;
258 }
259 if (FailureOrder >= SuccessOrder) {
260 // Don't assert on undefined behaviour.
261 FailureOrder =
262 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
263 }
Tim Northovercadbbe12014-06-13 19:43:04 +0000264 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align,
265 SuccessOrder, FailureOrder);
Tim Northover9c177222014-03-13 19:25:48 +0000266 return;
267 }
268
269 // Create all the relevant BB's
Craig Topper8a13c412014-05-21 05:09:00 +0000270 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
271 *SeqCstBB = nullptr;
Tim Northover9c177222014-03-13 19:25:48 +0000272 MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
273 if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
274 AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
275 if (SuccessOrder == llvm::SequentiallyConsistent)
276 SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
277
278 llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
279
280 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
281
282 // Emit all the different atomics
283
284 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
285 // doesn't matter unless someone is crazy enough to use something that
286 // doesn't fold to a constant for the ordering.
287 CGF.Builder.SetInsertPoint(MonotonicBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000288 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000289 Size, Align, SuccessOrder, llvm::Monotonic);
290 CGF.Builder.CreateBr(ContBB);
291
292 if (AcquireBB) {
293 CGF.Builder.SetInsertPoint(AcquireBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000294 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000295 Size, Align, SuccessOrder, llvm::Acquire);
296 CGF.Builder.CreateBr(ContBB);
297 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
298 AcquireBB);
299 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
300 AcquireBB);
301 }
302 if (SeqCstBB) {
303 CGF.Builder.SetInsertPoint(SeqCstBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000304 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Tim Northover9c177222014-03-13 19:25:48 +0000305 Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
306 CGF.Builder.CreateBr(ContBB);
307 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
308 SeqCstBB);
309 }
310
311 CGF.Builder.SetInsertPoint(ContBB);
312}
313
314static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
315 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
Tim Northovercadbbe12014-06-13 19:43:04 +0000316 llvm::Value *IsWeak, llvm::Value *FailureOrder,
317 uint64_t Size, unsigned Align,
318 llvm::AtomicOrdering Order) {
John McCallfc207f22013-03-07 21:37:12 +0000319 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
320 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
321
322 switch (E->getOp()) {
323 case AtomicExpr::AO__c11_atomic_init:
324 llvm_unreachable("Already handled!");
325
326 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
Tim Northovercadbbe12014-06-13 19:43:04 +0000327 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
328 FailureOrder, Size, Align, Order);
John McCallfc207f22013-03-07 21:37:12 +0000329 return;
Tim Northovercadbbe12014-06-13 19:43:04 +0000330 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
331 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
332 FailureOrder, Size, Align, Order);
333 return;
334 case AtomicExpr::AO__atomic_compare_exchange:
335 case AtomicExpr::AO__atomic_compare_exchange_n: {
336 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
337 emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
338 Val1, Val2, FailureOrder, Size, Align, Order);
339 } else {
340 // Create all the relevant BB's
341 llvm::BasicBlock *StrongBB =
342 CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
343 llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
344 llvm::BasicBlock *ContBB =
345 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
346
347 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
348 SI->addCase(CGF.Builder.getInt1(false), StrongBB);
349
350 CGF.Builder.SetInsertPoint(StrongBB);
351 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
352 FailureOrder, Size, Align, Order);
353 CGF.Builder.CreateBr(ContBB);
354
355 CGF.Builder.SetInsertPoint(WeakBB);
356 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
357 FailureOrder, Size, Align, Order);
358 CGF.Builder.CreateBr(ContBB);
359
360 CGF.Builder.SetInsertPoint(ContBB);
361 }
362 return;
363 }
John McCallfc207f22013-03-07 21:37:12 +0000364 case AtomicExpr::AO__c11_atomic_load:
365 case AtomicExpr::AO__atomic_load_n:
366 case AtomicExpr::AO__atomic_load: {
367 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
368 Load->setAtomic(Order);
369 Load->setAlignment(Size);
370 Load->setVolatile(E->isVolatile());
371 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
372 StoreDest->setAlignment(Align);
373 return;
374 }
375
376 case AtomicExpr::AO__c11_atomic_store:
377 case AtomicExpr::AO__atomic_store:
378 case AtomicExpr::AO__atomic_store_n: {
379 assert(!Dest && "Store does not return a value");
380 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
381 LoadVal1->setAlignment(Align);
382 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
383 Store->setAtomic(Order);
384 Store->setAlignment(Size);
385 Store->setVolatile(E->isVolatile());
386 return;
387 }
388
389 case AtomicExpr::AO__c11_atomic_exchange:
390 case AtomicExpr::AO__atomic_exchange_n:
391 case AtomicExpr::AO__atomic_exchange:
392 Op = llvm::AtomicRMWInst::Xchg;
393 break;
394
395 case AtomicExpr::AO__atomic_add_fetch:
396 PostOp = llvm::Instruction::Add;
397 // Fall through.
398 case AtomicExpr::AO__c11_atomic_fetch_add:
399 case AtomicExpr::AO__atomic_fetch_add:
400 Op = llvm::AtomicRMWInst::Add;
401 break;
402
403 case AtomicExpr::AO__atomic_sub_fetch:
404 PostOp = llvm::Instruction::Sub;
405 // Fall through.
406 case AtomicExpr::AO__c11_atomic_fetch_sub:
407 case AtomicExpr::AO__atomic_fetch_sub:
408 Op = llvm::AtomicRMWInst::Sub;
409 break;
410
411 case AtomicExpr::AO__atomic_and_fetch:
412 PostOp = llvm::Instruction::And;
413 // Fall through.
414 case AtomicExpr::AO__c11_atomic_fetch_and:
415 case AtomicExpr::AO__atomic_fetch_and:
416 Op = llvm::AtomicRMWInst::And;
417 break;
418
419 case AtomicExpr::AO__atomic_or_fetch:
420 PostOp = llvm::Instruction::Or;
421 // Fall through.
422 case AtomicExpr::AO__c11_atomic_fetch_or:
423 case AtomicExpr::AO__atomic_fetch_or:
424 Op = llvm::AtomicRMWInst::Or;
425 break;
426
427 case AtomicExpr::AO__atomic_xor_fetch:
428 PostOp = llvm::Instruction::Xor;
429 // Fall through.
430 case AtomicExpr::AO__c11_atomic_fetch_xor:
431 case AtomicExpr::AO__atomic_fetch_xor:
432 Op = llvm::AtomicRMWInst::Xor;
433 break;
434
435 case AtomicExpr::AO__atomic_nand_fetch:
436 PostOp = llvm::Instruction::And;
437 // Fall through.
438 case AtomicExpr::AO__atomic_fetch_nand:
439 Op = llvm::AtomicRMWInst::Nand;
440 break;
441 }
442
443 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
444 LoadVal1->setAlignment(Align);
445 llvm::AtomicRMWInst *RMWI =
446 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
447 RMWI->setVolatile(E->isVolatile());
448
449 // For __atomic_*_fetch operations, perform the operation again to
450 // determine the value which was written.
451 llvm::Value *Result = RMWI;
452 if (PostOp)
453 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
454 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
455 Result = CGF.Builder.CreateNot(Result);
456 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
457 StoreDest->setAlignment(Align);
458}
459
460// This function emits any expression (scalar, complex, or aggregate)
461// into a temporary alloca.
462static llvm::Value *
463EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
464 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
465 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
466 /*Init*/ true);
467 return DeclPtr;
468}
469
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000470static void
471AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000472 bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000473 SourceLocation Loc, CharUnits SizeInChars) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000474 if (UseOptimizedLibcall) {
475 // Load value and pass it to the function directly.
476 unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
David Majnemer0392cf82014-08-29 07:27:49 +0000477 int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
478 ValTy =
479 CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
480 llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
481 SizeInBits)->getPointerTo();
482 Val = CGF.EmitLoadOfScalar(CGF.Builder.CreateBitCast(Val, IPtrTy), false,
483 Align, CGF.getContext().getPointerType(ValTy),
484 Loc);
485 // Coerce the value into an appropriately sized integer type.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000486 Args.add(RValue::get(Val), ValTy);
487 } else {
488 // Non-optimized functions always take a reference.
489 Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
490 CGF.getContext().VoidPtrTy);
491 }
492}
493
John McCallfc207f22013-03-07 21:37:12 +0000494RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
495 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
496 QualType MemTy = AtomicTy;
497 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
498 MemTy = AT->getValueType();
499 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
500 uint64_t Size = sizeChars.getQuantity();
501 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
502 unsigned Align = alignChars.getQuantity();
503 unsigned MaxInlineWidthInBits =
John McCallc8e01702013-04-16 22:48:15 +0000504 getTarget().getMaxAtomicInlineWidth();
John McCallfc207f22013-03-07 21:37:12 +0000505 bool UseLibcall = (Size != Align ||
506 getContext().toBits(sizeChars) > MaxInlineWidthInBits);
507
Tim Northovercadbbe12014-06-13 19:43:04 +0000508 llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr,
509 *Val2 = nullptr;
Craig Topper8a13c412014-05-21 05:09:00 +0000510 llvm::Value *Ptr = EmitScalarExpr(E->getPtr());
John McCallfc207f22013-03-07 21:37:12 +0000511
512 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
513 assert(!Dest && "Init does not return a value");
John McCalla8ec7eb2013-03-07 21:37:17 +0000514 LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
515 EmitAtomicInit(E->getVal1(), lvalue);
Craig Topper8a13c412014-05-21 05:09:00 +0000516 return RValue::get(nullptr);
John McCallfc207f22013-03-07 21:37:12 +0000517 }
518
Craig Topper8a13c412014-05-21 05:09:00 +0000519 llvm::Value *Order = EmitScalarExpr(E->getOrder());
John McCallfc207f22013-03-07 21:37:12 +0000520
521 switch (E->getOp()) {
522 case AtomicExpr::AO__c11_atomic_init:
523 llvm_unreachable("Already handled!");
524
525 case AtomicExpr::AO__c11_atomic_load:
526 case AtomicExpr::AO__atomic_load_n:
527 break;
528
529 case AtomicExpr::AO__atomic_load:
530 Dest = EmitScalarExpr(E->getVal1());
531 break;
532
533 case AtomicExpr::AO__atomic_store:
534 Val1 = EmitScalarExpr(E->getVal1());
535 break;
536
537 case AtomicExpr::AO__atomic_exchange:
538 Val1 = EmitScalarExpr(E->getVal1());
539 Dest = EmitScalarExpr(E->getVal2());
540 break;
541
542 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
543 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
544 case AtomicExpr::AO__atomic_compare_exchange_n:
545 case AtomicExpr::AO__atomic_compare_exchange:
546 Val1 = EmitScalarExpr(E->getVal1());
547 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
548 Val2 = EmitScalarExpr(E->getVal2());
549 else
550 Val2 = EmitValToTemp(*this, E->getVal2());
551 OrderFail = EmitScalarExpr(E->getOrderFail());
John McCallfc207f22013-03-07 21:37:12 +0000552 if (E->getNumSubExprs() == 6)
Tim Northovercadbbe12014-06-13 19:43:04 +0000553 IsWeak = EmitScalarExpr(E->getWeak());
John McCallfc207f22013-03-07 21:37:12 +0000554 break;
555
556 case AtomicExpr::AO__c11_atomic_fetch_add:
557 case AtomicExpr::AO__c11_atomic_fetch_sub:
558 if (MemTy->isPointerType()) {
559 // For pointer arithmetic, we're required to do a bit of math:
560 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
561 // ... but only for the C11 builtins. The GNU builtins expect the
562 // user to multiply by sizeof(T).
563 QualType Val1Ty = E->getVal1()->getType();
564 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
565 CharUnits PointeeIncAmt =
566 getContext().getTypeSizeInChars(MemTy->getPointeeType());
567 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
568 Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
569 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
570 break;
571 }
572 // Fall through.
573 case AtomicExpr::AO__atomic_fetch_add:
574 case AtomicExpr::AO__atomic_fetch_sub:
575 case AtomicExpr::AO__atomic_add_fetch:
576 case AtomicExpr::AO__atomic_sub_fetch:
577 case AtomicExpr::AO__c11_atomic_store:
578 case AtomicExpr::AO__c11_atomic_exchange:
579 case AtomicExpr::AO__atomic_store_n:
580 case AtomicExpr::AO__atomic_exchange_n:
581 case AtomicExpr::AO__c11_atomic_fetch_and:
582 case AtomicExpr::AO__c11_atomic_fetch_or:
583 case AtomicExpr::AO__c11_atomic_fetch_xor:
584 case AtomicExpr::AO__atomic_fetch_and:
585 case AtomicExpr::AO__atomic_fetch_or:
586 case AtomicExpr::AO__atomic_fetch_xor:
587 case AtomicExpr::AO__atomic_fetch_nand:
588 case AtomicExpr::AO__atomic_and_fetch:
589 case AtomicExpr::AO__atomic_or_fetch:
590 case AtomicExpr::AO__atomic_xor_fetch:
591 case AtomicExpr::AO__atomic_nand_fetch:
592 Val1 = EmitValToTemp(*this, E->getVal1());
593 break;
594 }
595
David Majnemeree8d04d2014-12-12 08:16:09 +0000596 QualType RValTy = E->getType().getUnqualifiedType();
597
David Majnemer659be552014-11-25 23:44:32 +0000598 auto GetDest = [&] {
David Majnemeree8d04d2014-12-12 08:16:09 +0000599 if (!RValTy->isVoidType() && !Dest) {
600 Dest = CreateMemTemp(RValTy, ".atomicdst");
601 }
David Majnemer659be552014-11-25 23:44:32 +0000602 return Dest;
603 };
John McCallfc207f22013-03-07 21:37:12 +0000604
605 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
606 if (UseLibcall) {
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000607 bool UseOptimizedLibcall = false;
608 switch (E->getOp()) {
609 case AtomicExpr::AO__c11_atomic_fetch_add:
610 case AtomicExpr::AO__atomic_fetch_add:
611 case AtomicExpr::AO__c11_atomic_fetch_and:
612 case AtomicExpr::AO__atomic_fetch_and:
613 case AtomicExpr::AO__c11_atomic_fetch_or:
614 case AtomicExpr::AO__atomic_fetch_or:
615 case AtomicExpr::AO__c11_atomic_fetch_sub:
616 case AtomicExpr::AO__atomic_fetch_sub:
617 case AtomicExpr::AO__c11_atomic_fetch_xor:
618 case AtomicExpr::AO__atomic_fetch_xor:
619 // For these, only library calls for certain sizes exist.
620 UseOptimizedLibcall = true;
621 break;
622 default:
623 // Only use optimized library calls for sizes for which they exist.
624 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
625 UseOptimizedLibcall = true;
626 break;
627 }
John McCallfc207f22013-03-07 21:37:12 +0000628
John McCallfc207f22013-03-07 21:37:12 +0000629 CallArgList Args;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000630 if (!UseOptimizedLibcall) {
631 // For non-optimized library calls, the size is the first parameter
632 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
633 getContext().getSizeType());
634 }
635 // Atomic address is the first or second parameter
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000636 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
John McCallfc207f22013-03-07 21:37:12 +0000637
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000638 std::string LibCallName;
Logan Chien74798a32014-03-26 17:35:01 +0000639 QualType LoweredMemTy =
640 MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000641 QualType RetTy;
642 bool HaveRetTy = false;
John McCallfc207f22013-03-07 21:37:12 +0000643 switch (E->getOp()) {
644 // There is only one libcall for compare an exchange, because there is no
645 // optimisation benefit possible from a libcall version of a weak compare
646 // and exchange.
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000647 // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
John McCallfc207f22013-03-07 21:37:12 +0000648 // void *desired, int success, int failure)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000649 // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
650 // int success, int failure)
John McCallfc207f22013-03-07 21:37:12 +0000651 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
652 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
653 case AtomicExpr::AO__atomic_compare_exchange:
654 case AtomicExpr::AO__atomic_compare_exchange_n:
655 LibCallName = "__atomic_compare_exchange";
656 RetTy = getContext().BoolTy;
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000657 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000658 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
659 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000660 E->getExprLoc(), sizeChars);
Nick Lewycky5fa40c32013-10-01 21:51:38 +0000661 Args.add(RValue::get(Order), getContext().IntTy);
John McCallfc207f22013-03-07 21:37:12 +0000662 Order = OrderFail;
663 break;
664 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
665 // int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000666 // T __atomic_exchange_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000667 case AtomicExpr::AO__c11_atomic_exchange:
668 case AtomicExpr::AO__atomic_exchange_n:
669 case AtomicExpr::AO__atomic_exchange:
670 LibCallName = "__atomic_exchange";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000671 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000672 E->getExprLoc(), sizeChars);
John McCallfc207f22013-03-07 21:37:12 +0000673 break;
674 // void __atomic_store(size_t size, void *mem, void *val, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000675 // void __atomic_store_N(T *mem, T val, int order)
John McCallfc207f22013-03-07 21:37:12 +0000676 case AtomicExpr::AO__c11_atomic_store:
677 case AtomicExpr::AO__atomic_store:
678 case AtomicExpr::AO__atomic_store_n:
679 LibCallName = "__atomic_store";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000680 RetTy = getContext().VoidTy;
681 HaveRetTy = true;
Nick Lewycky2d84e842013-10-02 02:29:49 +0000682 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000683 E->getExprLoc(), sizeChars);
John McCallfc207f22013-03-07 21:37:12 +0000684 break;
685 // void __atomic_load(size_t size, void *mem, void *return, int order)
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000686 // T __atomic_load_N(T *mem, int order)
John McCallfc207f22013-03-07 21:37:12 +0000687 case AtomicExpr::AO__c11_atomic_load:
688 case AtomicExpr::AO__atomic_load:
689 case AtomicExpr::AO__atomic_load_n:
690 LibCallName = "__atomic_load";
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000691 break;
692 // T __atomic_fetch_add_N(T *mem, T val, int order)
693 case AtomicExpr::AO__c11_atomic_fetch_add:
694 case AtomicExpr::AO__atomic_fetch_add:
695 LibCallName = "__atomic_fetch_add";
Logan Chien74798a32014-03-26 17:35:01 +0000696 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000697 E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000698 break;
699 // T __atomic_fetch_and_N(T *mem, T val, int order)
700 case AtomicExpr::AO__c11_atomic_fetch_and:
701 case AtomicExpr::AO__atomic_fetch_and:
702 LibCallName = "__atomic_fetch_and";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000703 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000704 E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000705 break;
706 // T __atomic_fetch_or_N(T *mem, T val, int order)
707 case AtomicExpr::AO__c11_atomic_fetch_or:
708 case AtomicExpr::AO__atomic_fetch_or:
709 LibCallName = "__atomic_fetch_or";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000710 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000711 E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000712 break;
713 // T __atomic_fetch_sub_N(T *mem, T val, int order)
714 case AtomicExpr::AO__c11_atomic_fetch_sub:
715 case AtomicExpr::AO__atomic_fetch_sub:
716 LibCallName = "__atomic_fetch_sub";
Logan Chien74798a32014-03-26 17:35:01 +0000717 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000718 E->getExprLoc(), sizeChars);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000719 break;
720 // T __atomic_fetch_xor_N(T *mem, T val, int order)
721 case AtomicExpr::AO__c11_atomic_fetch_xor:
722 case AtomicExpr::AO__atomic_fetch_xor:
723 LibCallName = "__atomic_fetch_xor";
Nick Lewycky2d84e842013-10-02 02:29:49 +0000724 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
David Majnemer0392cf82014-08-29 07:27:49 +0000725 E->getExprLoc(), sizeChars);
John McCallfc207f22013-03-07 21:37:12 +0000726 break;
John McCallfc207f22013-03-07 21:37:12 +0000727 default: return EmitUnsupportedRValue(E, "atomic library call");
728 }
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000729
730 // Optimized functions have the size in their name.
731 if (UseOptimizedLibcall)
732 LibCallName += "_" + llvm::utostr(Size);
733 // By default, assume we return a value of the atomic type.
734 if (!HaveRetTy) {
735 if (UseOptimizedLibcall) {
736 // Value is returned directly.
David Majnemer0392cf82014-08-29 07:27:49 +0000737 // The function returns an appropriately sized integer type.
738 RetTy = getContext().getIntTypeForBitwidth(
739 getContext().toBits(sizeChars), /*Signed=*/false);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000740 } else {
741 // Value is returned through parameter before the order.
742 RetTy = getContext().VoidTy;
David Majnemer659be552014-11-25 23:44:32 +0000743 Args.add(RValue::get(EmitCastToVoidPtr(Dest)), getContext().VoidPtrTy);
Ed Schoutenc7e82bd2013-05-31 19:27:59 +0000744 }
745 }
John McCallfc207f22013-03-07 21:37:12 +0000746 // order is always the last parameter
747 Args.add(RValue::get(Order),
748 getContext().IntTy);
749
David Majnemer659be552014-11-25 23:44:32 +0000750 RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
751 // The value is returned directly from the libcall.
752 if (HaveRetTy && !RetTy->isVoidType())
753 return Res;
754 // The value is returned via an explicit out param.
755 if (RetTy->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +0000756 return RValue::get(nullptr);
David Majnemer659be552014-11-25 23:44:32 +0000757 // The value is returned directly for optimized libcalls but the caller is
758 // expected an out-param.
759 if (UseOptimizedLibcall) {
760 llvm::Value *ResVal = Res.getScalarVal();
761 llvm::StoreInst *StoreDest = Builder.CreateStore(
762 ResVal,
763 Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo()));
764 StoreDest->setAlignment(Align);
765 }
David Majnemeree8d04d2014-12-12 08:16:09 +0000766 return convertTempToRValue(Dest, RValTy, E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000767 }
768
769 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
770 E->getOp() == AtomicExpr::AO__atomic_store ||
771 E->getOp() == AtomicExpr::AO__atomic_store_n;
772 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
773 E->getOp() == AtomicExpr::AO__atomic_load ||
774 E->getOp() == AtomicExpr::AO__atomic_load_n;
775
David Majnemerd8cd8f72014-11-22 10:44:12 +0000776 llvm::Type *ITy =
777 llvm::IntegerType::get(getLLVMContext(), Size * 8);
David Majnemer659be552014-11-25 23:44:32 +0000778 llvm::Value *OrigDest = GetDest();
David Majnemerd8cd8f72014-11-22 10:44:12 +0000779 Ptr = Builder.CreateBitCast(
780 Ptr, ITy->getPointerTo(Ptr->getType()->getPointerAddressSpace()));
781 if (Val1) Val1 = Builder.CreateBitCast(Val1, ITy->getPointerTo());
782 if (Val2) Val2 = Builder.CreateBitCast(Val2, ITy->getPointerTo());
783 if (Dest && !E->isCmpXChg())
784 Dest = Builder.CreateBitCast(Dest, ITy->getPointerTo());
John McCallfc207f22013-03-07 21:37:12 +0000785
786 if (isa<llvm::ConstantInt>(Order)) {
787 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
788 switch (ord) {
Tim Northovere94a34c2014-03-11 10:49:14 +0000789 case AtomicExpr::AO_ABI_memory_order_relaxed:
Tim Northovercadbbe12014-06-13 19:43:04 +0000790 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000791 Size, Align, llvm::Monotonic);
John McCallfc207f22013-03-07 21:37:12 +0000792 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000793 case AtomicExpr::AO_ABI_memory_order_consume:
794 case AtomicExpr::AO_ABI_memory_order_acquire:
John McCallfc207f22013-03-07 21:37:12 +0000795 if (IsStore)
796 break; // Avoid crashing on code with undefined behavior
Tim Northovercadbbe12014-06-13 19:43:04 +0000797 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000798 Size, Align, llvm::Acquire);
John McCallfc207f22013-03-07 21:37:12 +0000799 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000800 case AtomicExpr::AO_ABI_memory_order_release:
John McCallfc207f22013-03-07 21:37:12 +0000801 if (IsLoad)
802 break; // Avoid crashing on code with undefined behavior
Tim Northovercadbbe12014-06-13 19:43:04 +0000803 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000804 Size, Align, llvm::Release);
John McCallfc207f22013-03-07 21:37:12 +0000805 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000806 case AtomicExpr::AO_ABI_memory_order_acq_rel:
John McCallfc207f22013-03-07 21:37:12 +0000807 if (IsLoad || IsStore)
808 break; // Avoid crashing on code with undefined behavior
Tim Northovercadbbe12014-06-13 19:43:04 +0000809 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000810 Size, Align, llvm::AcquireRelease);
John McCallfc207f22013-03-07 21:37:12 +0000811 break;
Tim Northovere94a34c2014-03-11 10:49:14 +0000812 case AtomicExpr::AO_ABI_memory_order_seq_cst:
Tim Northovercadbbe12014-06-13 19:43:04 +0000813 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000814 Size, Align, llvm::SequentiallyConsistent);
John McCallfc207f22013-03-07 21:37:12 +0000815 break;
816 default: // invalid order
817 // We should not ever get here normally, but it's hard to
818 // enforce that in general.
819 break;
820 }
David Majnemeree8d04d2014-12-12 08:16:09 +0000821 if (RValTy->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +0000822 return RValue::get(nullptr);
David Majnemeree8d04d2014-12-12 08:16:09 +0000823 return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000824 }
825
826 // Long case, when Order isn't obviously constant.
827
828 // Create all the relevant BB's
Craig Topper8a13c412014-05-21 05:09:00 +0000829 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
830 *ReleaseBB = nullptr, *AcqRelBB = nullptr,
831 *SeqCstBB = nullptr;
John McCallfc207f22013-03-07 21:37:12 +0000832 MonotonicBB = createBasicBlock("monotonic", CurFn);
833 if (!IsStore)
834 AcquireBB = createBasicBlock("acquire", CurFn);
835 if (!IsLoad)
836 ReleaseBB = createBasicBlock("release", CurFn);
837 if (!IsLoad && !IsStore)
838 AcqRelBB = createBasicBlock("acqrel", CurFn);
839 SeqCstBB = createBasicBlock("seqcst", CurFn);
840 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
841
842 // Create the switch for the split
843 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
844 // doesn't matter unless someone is crazy enough to use something that
845 // doesn't fold to a constant for the ordering.
846 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
847 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
848
849 // Emit all the different atomics
850 Builder.SetInsertPoint(MonotonicBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000851 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000852 Size, Align, llvm::Monotonic);
John McCallfc207f22013-03-07 21:37:12 +0000853 Builder.CreateBr(ContBB);
854 if (!IsStore) {
855 Builder.SetInsertPoint(AcquireBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000856 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000857 Size, Align, llvm::Acquire);
John McCallfc207f22013-03-07 21:37:12 +0000858 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000859 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
860 AcquireBB);
861 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
862 AcquireBB);
John McCallfc207f22013-03-07 21:37:12 +0000863 }
864 if (!IsLoad) {
865 Builder.SetInsertPoint(ReleaseBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000866 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000867 Size, Align, llvm::Release);
John McCallfc207f22013-03-07 21:37:12 +0000868 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000869 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
870 ReleaseBB);
John McCallfc207f22013-03-07 21:37:12 +0000871 }
872 if (!IsLoad && !IsStore) {
873 Builder.SetInsertPoint(AcqRelBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000874 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000875 Size, Align, llvm::AcquireRelease);
John McCallfc207f22013-03-07 21:37:12 +0000876 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000877 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
878 AcqRelBB);
John McCallfc207f22013-03-07 21:37:12 +0000879 }
880 Builder.SetInsertPoint(SeqCstBB);
Tim Northovercadbbe12014-06-13 19:43:04 +0000881 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
Tim Northover9c177222014-03-13 19:25:48 +0000882 Size, Align, llvm::SequentiallyConsistent);
John McCallfc207f22013-03-07 21:37:12 +0000883 Builder.CreateBr(ContBB);
Tim Northover514fc612014-03-13 19:25:52 +0000884 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
885 SeqCstBB);
John McCallfc207f22013-03-07 21:37:12 +0000886
887 // Cleanup and return
888 Builder.SetInsertPoint(ContBB);
David Majnemeree8d04d2014-12-12 08:16:09 +0000889 if (RValTy->isVoidType())
Craig Topper8a13c412014-05-21 05:09:00 +0000890 return RValue::get(nullptr);
David Majnemeree8d04d2014-12-12 08:16:09 +0000891 return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
John McCallfc207f22013-03-07 21:37:12 +0000892}
John McCalla8ec7eb2013-03-07 21:37:17 +0000893
894llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
895 unsigned addrspace =
896 cast<llvm::PointerType>(addr->getType())->getAddressSpace();
897 llvm::IntegerType *ty =
898 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
899 return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
900}
901
902RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
Nick Lewycky2d84e842013-10-02 02:29:49 +0000903 AggValueSlot resultSlot,
904 SourceLocation loc) const {
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000905 if (EvaluationKind == TEK_Aggregate)
906 return resultSlot.asRValue();
John McCalla8ec7eb2013-03-07 21:37:17 +0000907
908 // Drill into the padding structure if we have one.
909 if (hasPadding())
910 addr = CGF.Builder.CreateStructGEP(addr, 0);
911
John McCalla8ec7eb2013-03-07 21:37:17 +0000912 // Otherwise, just convert the temporary to an r-value using the
913 // normal conversion routine.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000914 return CGF.convertTempToRValue(addr, getValueType(), loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000915}
916
Alexey Bataev452d8e12014-12-15 05:25:25 +0000917RValue AtomicInfo::convertIntToValue(llvm::Value *IntVal,
918 AggValueSlot ResultSlot,
919 SourceLocation Loc) const {
920 // Try not to in some easy cases.
921 assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
922 if (getEvaluationKind() == TEK_Scalar && !hasPadding()) {
923 auto *ValTy = CGF.ConvertTypeForMem(ValueTy);
924 if (ValTy->isIntegerTy()) {
925 assert(IntVal->getType() == ValTy && "Different integer types.");
926 return RValue::get(IntVal);
927 } else if (ValTy->isPointerTy())
928 return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
929 else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
930 return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
931 }
932
933 // Create a temporary. This needs to be big enough to hold the
934 // atomic integer.
935 llvm::Value *Temp;
936 bool TempIsVolatile = false;
937 CharUnits TempAlignment;
938 if (getEvaluationKind() == TEK_Aggregate) {
939 assert(!ResultSlot.isIgnored());
940 Temp = ResultSlot.getAddr();
941 TempAlignment = getValueAlignment();
942 TempIsVolatile = ResultSlot.isVolatile();
943 } else {
944 Temp = CGF.CreateMemTemp(getAtomicType(), "atomic-temp");
945 TempAlignment = getAtomicAlignment();
946 }
947
948 // Slam the integer into the temporary.
949 llvm::Value *CastTemp = emitCastToAtomicIntPointer(Temp);
950 CGF.Builder.CreateAlignedStore(IntVal, CastTemp, TempAlignment.getQuantity())
951 ->setVolatile(TempIsVolatile);
952
953 return convertTempToRValue(Temp, ResultSlot, Loc);
954}
955
John McCalla8ec7eb2013-03-07 21:37:17 +0000956/// Emit a load from an l-value of atomic type. Note that the r-value
957/// we produce is an r-value of the atomic *value* type.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000958RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
959 AggValueSlot resultSlot) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000960 AtomicInfo atomics(*this, src);
961
962 // Check whether we should use a library call.
963 if (atomics.shouldUseLibcall()) {
964 llvm::Value *tempAddr;
Eli Friedmanbe4504d2013-07-11 01:32:21 +0000965 if (!resultSlot.isIgnored()) {
John McCalla8ec7eb2013-03-07 21:37:17 +0000966 assert(atomics.getEvaluationKind() == TEK_Aggregate);
967 tempAddr = resultSlot.getAddr();
968 } else {
969 tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
970 }
971
972 // void __atomic_load(size_t size, void *mem, void *return, int order);
973 CallArgList args;
974 args.add(RValue::get(atomics.getAtomicSizeValue()),
975 getContext().getSizeType());
976 args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
977 getContext().VoidPtrTy);
978 args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
979 getContext().VoidPtrTy);
Tim Northovere94a34c2014-03-11 10:49:14 +0000980 args.add(RValue::get(llvm::ConstantInt::get(
981 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
John McCalla8ec7eb2013-03-07 21:37:17 +0000982 getContext().IntTy);
983 emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
984
985 // Produce the r-value.
Nick Lewycky2d84e842013-10-02 02:29:49 +0000986 return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
John McCalla8ec7eb2013-03-07 21:37:17 +0000987 }
988
989 // Okay, we're doing this natively.
990 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
991 llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
992 load->setAtomic(llvm::SequentiallyConsistent);
993
994 // Other decoration.
995 load->setAlignment(src.getAlignment().getQuantity());
996 if (src.isVolatileQualified())
997 load->setVolatile(true);
998 if (src.getTBAAInfo())
999 CGM.DecorateInstruction(load, src.getTBAAInfo());
1000
John McCalla8ec7eb2013-03-07 21:37:17 +00001001 // If we're ignoring an aggregate return, don't do anything.
1002 if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
Craig Topper8a13c412014-05-21 05:09:00 +00001003 return RValue::getAggregate(nullptr, false);
John McCalla8ec7eb2013-03-07 21:37:17 +00001004
Alexey Bataev452d8e12014-12-15 05:25:25 +00001005 // Okay, turn that back into the original value type.
1006 return atomics.convertIntToValue(load, resultSlot, loc);
John McCalla8ec7eb2013-03-07 21:37:17 +00001007}
1008
1009
1010
1011/// Copy an r-value into memory as part of storing to an atomic type.
1012/// This needs to create a bit-pattern suitable for atomic operations.
1013void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
1014 // If we have an r-value, the rvalue should be of the atomic type,
1015 // which means that the caller is responsible for having zeroed
1016 // any padding. Just do an aggregate copy of that type.
1017 if (rvalue.isAggregate()) {
1018 CGF.EmitAggregateCopy(dest.getAddress(),
1019 rvalue.getAggregateAddr(),
1020 getAtomicType(),
1021 (rvalue.isVolatileQualified()
1022 || dest.isVolatileQualified()),
1023 dest.getAlignment());
1024 return;
1025 }
1026
1027 // Okay, otherwise we're copying stuff.
1028
1029 // Zero out the buffer if necessary.
1030 emitMemSetZeroIfNecessary(dest);
1031
1032 // Drill past the padding if present.
1033 dest = projectValue(dest);
1034
1035 // Okay, store the rvalue in.
1036 if (rvalue.isScalar()) {
1037 CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
1038 } else {
1039 CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
1040 }
1041}
1042
1043
1044/// Materialize an r-value into memory for the purposes of storing it
1045/// to an atomic type.
1046llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
1047 // Aggregate r-values are already in memory, and EmitAtomicStore
1048 // requires them to be values of the atomic type.
1049 if (rvalue.isAggregate())
1050 return rvalue.getAggregateAddr();
1051
1052 // Otherwise, make a temporary and materialize into it.
1053 llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
1054 LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
1055 emitCopyIntoMemory(rvalue, tempLV);
1056 return temp;
1057}
1058
Alexey Bataev452d8e12014-12-15 05:25:25 +00001059llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1060 // If we've got a scalar value of the right size, try to avoid going
1061 // through memory.
1062 if (RVal.isScalar() && !hasPadding()) {
1063 llvm::Value *Value = RVal.getScalarVal();
1064 if (isa<llvm::IntegerType>(Value->getType()))
1065 return Value;
1066 else {
1067 llvm::IntegerType *InputIntTy =
1068 llvm::IntegerType::get(CGF.getLLVMContext(), getValueSizeInBits());
1069 if (isa<llvm::PointerType>(Value->getType()))
1070 return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1071 else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1072 return CGF.Builder.CreateBitCast(Value, InputIntTy);
1073 }
1074 }
1075 // Otherwise, we need to go through memory.
1076 // Put the r-value in memory.
1077 llvm::Value *Addr = materializeRValue(RVal);
1078
1079 // Cast the temporary to the atomic int type and pull a value out.
1080 Addr = emitCastToAtomicIntPointer(Addr);
1081 return CGF.Builder.CreateAlignedLoad(Addr,
1082 getAtomicAlignment().getQuantity());
1083}
1084
John McCalla8ec7eb2013-03-07 21:37:17 +00001085/// Emit a store to an l-value of atomic type.
1086///
1087/// Note that the r-value is expected to be an r-value *of the atomic
1088/// type*; this means that for aggregate r-values, it should include
1089/// storage for any padding that was necessary.
Nick Lewycky5fa40c32013-10-01 21:51:38 +00001090void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
John McCalla8ec7eb2013-03-07 21:37:17 +00001091 // If this is an aggregate r-value, it should agree in type except
1092 // maybe for address-space qualification.
1093 assert(!rvalue.isAggregate() ||
1094 rvalue.getAggregateAddr()->getType()->getPointerElementType()
1095 == dest.getAddress()->getType()->getPointerElementType());
1096
1097 AtomicInfo atomics(*this, dest);
1098
1099 // If this is an initialization, just put the value there normally.
1100 if (isInit) {
1101 atomics.emitCopyIntoMemory(rvalue, dest);
1102 return;
1103 }
1104
1105 // Check whether we should use a library call.
1106 if (atomics.shouldUseLibcall()) {
1107 // Produce a source address.
1108 llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
1109
1110 // void __atomic_store(size_t size, void *mem, void *val, int order)
1111 CallArgList args;
1112 args.add(RValue::get(atomics.getAtomicSizeValue()),
1113 getContext().getSizeType());
1114 args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
1115 getContext().VoidPtrTy);
1116 args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
1117 getContext().VoidPtrTy);
Tim Northovere94a34c2014-03-11 10:49:14 +00001118 args.add(RValue::get(llvm::ConstantInt::get(
1119 IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)),
John McCalla8ec7eb2013-03-07 21:37:17 +00001120 getContext().IntTy);
1121 emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1122 return;
1123 }
1124
1125 // Okay, we're doing this natively.
Alexey Bataev452d8e12014-12-15 05:25:25 +00001126 llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
John McCalla8ec7eb2013-03-07 21:37:17 +00001127
1128 // Do the atomic store.
1129 llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
1130 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1131
1132 // Initializations don't need to be atomic.
1133 if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
1134
1135 // Other decoration.
1136 store->setAlignment(dest.getAlignment().getQuantity());
1137 if (dest.isVolatileQualified())
1138 store->setVolatile(true);
1139 if (dest.getTBAAInfo())
1140 CGM.DecorateInstruction(store, dest.getTBAAInfo());
1141}
1142
Alexey Bataev452d8e12014-12-15 05:25:25 +00001143/// Emit a compare-and-exchange op for atomic type.
1144///
1145std::pair<RValue, RValue> CodeGenFunction::EmitAtomicCompareExchange(
1146 LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
1147 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
1148 AggValueSlot Slot) {
1149 // If this is an aggregate r-value, it should agree in type except
1150 // maybe for address-space qualification.
1151 assert(!Expected.isAggregate() ||
1152 Expected.getAggregateAddr()->getType()->getPointerElementType() ==
1153 Obj.getAddress()->getType()->getPointerElementType());
1154 assert(!Desired.isAggregate() ||
1155 Desired.getAggregateAddr()->getType()->getPointerElementType() ==
1156 Obj.getAddress()->getType()->getPointerElementType());
1157 AtomicInfo Atomics(*this, Obj);
1158
1159 if (Failure >= Success)
1160 // Don't assert on undefined behavior.
1161 Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1162
1163 auto Alignment = Atomics.getValueAlignment();
1164 // Check whether we should use a library call.
1165 if (Atomics.shouldUseLibcall()) {
1166 auto *ExpectedAddr = Atomics.materializeRValue(Expected);
1167 // Produce a source address.
1168 auto *DesiredAddr = Atomics.materializeRValue(Desired);
1169 // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1170 // void *desired, int success, int failure);
1171 CallArgList Args;
1172 Args.add(RValue::get(Atomics.getAtomicSizeValue()),
1173 getContext().getSizeType());
1174 Args.add(RValue::get(EmitCastToVoidPtr(Obj.getAddress())),
1175 getContext().VoidPtrTy);
1176 Args.add(RValue::get(EmitCastToVoidPtr(ExpectedAddr)),
1177 getContext().VoidPtrTy);
1178 Args.add(RValue::get(EmitCastToVoidPtr(DesiredAddr)),
1179 getContext().VoidPtrTy);
1180 Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Success)),
1181 getContext().IntTy);
1182 Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Failure)),
1183 getContext().IntTy);
1184 auto SuccessFailureRVal = emitAtomicLibcall(
1185 *this, "__atomic_compare_exchange", getContext().BoolTy, Args);
1186 auto *PreviousVal =
1187 Builder.CreateAlignedLoad(ExpectedAddr, Alignment.getQuantity());
1188 return std::make_pair(RValue::get(PreviousVal), SuccessFailureRVal);
1189 }
1190
1191 // If we've got a scalar value of the right size, try to avoid going
1192 // through memory.
1193 auto *ExpectedIntVal = Atomics.convertRValueToInt(Expected);
1194 auto *DesiredIntVal = Atomics.convertRValueToInt(Desired);
1195
1196 // Do the atomic store.
1197 auto *Addr = Atomics.emitCastToAtomicIntPointer(Obj.getAddress());
1198 auto *Inst = Builder.CreateAtomicCmpXchg(Addr, ExpectedIntVal, DesiredIntVal,
1199 Success, Failure);
1200 // Other decoration.
1201 Inst->setVolatile(Obj.isVolatileQualified());
1202 Inst->setWeak(IsWeak);
1203
1204 // Okay, turn that back into the original value type.
1205 auto *PreviousVal = Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1206 auto *SuccessFailureVal = Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1207 return std::make_pair(Atomics.convertIntToValue(PreviousVal, Slot, Loc),
1208 RValue::get(SuccessFailureVal));
1209}
1210
John McCalla8ec7eb2013-03-07 21:37:17 +00001211void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1212 AtomicInfo atomics(*this, dest);
1213
1214 switch (atomics.getEvaluationKind()) {
1215 case TEK_Scalar: {
1216 llvm::Value *value = EmitScalarExpr(init);
1217 atomics.emitCopyIntoMemory(RValue::get(value), dest);
1218 return;
1219 }
1220
1221 case TEK_Complex: {
1222 ComplexPairTy value = EmitComplexExpr(init);
1223 atomics.emitCopyIntoMemory(RValue::getComplex(value), dest);
1224 return;
1225 }
1226
1227 case TEK_Aggregate: {
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001228 // Fix up the destination if the initializer isn't an expression
1229 // of atomic type.
1230 bool Zeroed = false;
John McCalla8ec7eb2013-03-07 21:37:17 +00001231 if (!init->getType()->isAtomicType()) {
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001232 Zeroed = atomics.emitMemSetZeroIfNecessary(dest);
John McCalla8ec7eb2013-03-07 21:37:17 +00001233 dest = atomics.projectValue(dest);
1234 }
1235
1236 // Evaluate the expression directly into the destination.
1237 AggValueSlot slot = AggValueSlot::forLValue(dest,
1238 AggValueSlot::IsNotDestructed,
1239 AggValueSlot::DoesNotNeedGCBarriers,
Eli Friedmanbe4504d2013-07-11 01:32:21 +00001240 AggValueSlot::IsNotAliased,
1241 Zeroed ? AggValueSlot::IsZeroed :
1242 AggValueSlot::IsNotZeroed);
1243
John McCalla8ec7eb2013-03-07 21:37:17 +00001244 EmitAggExpr(init, slot);
1245 return;
1246 }
1247 }
1248 llvm_unreachable("bad evaluation kind");
1249}