Automatic Reference Counting.
Language-design credit goes to a lot of people, but I particularly want
to single out Blaine Garst and Patrick Beard for their contributions.
Compiler implementation credit goes to Argyrios, Doug, Fariborz, and myself,
in no particular order.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@133103 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index e5da703..ac47034 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -347,13 +347,23 @@
continue;
}
- // Block pointers require copy/dispose.
- if (variable->getType()->isBlockPointerType()) {
- info.NeedsCopyDispose = true;
+ // If we have a lifetime qualifier, honor it for capture purposes.
+ // That includes *not* copying it if it's __unsafe_unretained.
+ if (Qualifiers::ObjCLifetime lifetime
+ = variable->getType().getObjCLifetime()) {
+ switch (lifetime) {
+ case Qualifiers::OCL_None: llvm_unreachable("impossible");
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
- // So do Objective-C pointers.
- } else if (variable->getType()->isObjCObjectPointerType() ||
- C.isObjCNSObjectType(variable->getType())) {
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ info.NeedsCopyDispose = true;
+ }
+
+ // Block pointers require copy/dispose. So do Objective-C pointers.
+ } else if (variable->getType()->isObjCRetainableType()) {
info.NeedsCopyDispose = true;
// So do types that require non-trivial copy construction.
@@ -591,6 +601,11 @@
// Otherwise, fake up a POD copy into the block field.
} else {
+ // Fake up a new variable so that EmitScalarInit doesn't think
+ // we're referring to the variable in its own initializer.
+ ImplicitParamDecl blockFieldPseudoVar(/*DC*/ 0, SourceLocation(),
+ /*name*/ 0, type);
+
// We use one of these or the other depending on whether the
// reference is nested.
DeclRefExpr notNested(const_cast<VarDecl*>(variable), type, VK_LValue,
@@ -603,15 +618,29 @@
ImplicitCastExpr l2r(ImplicitCastExpr::OnStack, type, CK_LValueToRValue,
declRef, VK_RValue);
- EmitExprAsInit(&l2r, variable, blockField,
+ EmitExprAsInit(&l2r, &blockFieldPseudoVar, blockField,
getContext().getDeclAlign(variable),
/*captured by init*/ false);
}
// Push a destructor if necessary. The semantics for when this
// actually gets run are really obscure.
- if (!ci->isByRef() && CGM.getLangOptions().CPlusPlus)
- PushDestructorCleanup(type, blockField);
+ if (!ci->isByRef()) {
+ switch (type.isDestructedType()) {
+ case QualType::DK_none:
+ break;
+ case QualType::DK_cxx_destructor:
+ PushDestructorCleanup(type, blockField);
+ break;
+ case QualType::DK_objc_strong_lifetime:
+ PushARCReleaseCleanup(getARCCleanupKind(), type, blockField, false);
+ break;
+ case QualType::DK_objc_weak_lifetime:
+ // __weak objects on the stack always get EH cleanups.
+ PushARCWeakReleaseCleanup(NormalAndEHCleanup, type, blockField);
+ break;
+ }
+ }
}
// Cast to the converted block-pointer type, which happens (somewhat
@@ -1023,8 +1052,6 @@
-
-
llvm::Constant *
CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
ASTContext &C = getContext();
@@ -1084,21 +1111,40 @@
if (capture.isConstant()) continue;
const Expr *copyExpr = ci->getCopyExpr();
- unsigned flags = 0;
+ BlockFieldFlags flags;
+
+ bool isARCWeakCapture = false;
if (copyExpr) {
assert(!ci->isByRef());
// don't bother computing flags
+
} else if (ci->isByRef()) {
flags = BLOCK_FIELD_IS_BYREF;
- if (type.isObjCGCWeak()) flags |= BLOCK_FIELD_IS_WEAK;
- } else if (type->isBlockPointerType()) {
- flags = BLOCK_FIELD_IS_BLOCK;
- } else if (type->isObjCObjectPointerType() || C.isObjCNSObjectType(type)) {
- flags = BLOCK_FIELD_IS_OBJECT;
- }
+ if (type.isObjCGCWeak())
+ flags |= BLOCK_FIELD_IS_WEAK;
- if (!copyExpr && !flags) continue;
+ } else if (type->isObjCRetainableType()) {
+ flags = BLOCK_FIELD_IS_OBJECT;
+ if (type->isBlockPointerType())
+ flags = BLOCK_FIELD_IS_BLOCK;
+
+ // Special rules for ARC captures:
+ if (getLangOptions().ObjCAutoRefCount) {
+ Qualifiers qs = type.getQualifiers();
+
+ // Don't generate special copy logic for a captured object
+ // unless it's __strong or __weak.
+ if (!qs.hasStrongOrWeakObjCLifetime())
+ continue;
+
+ // Support __weak direct captures.
+ if (qs.getObjCLifetime() == Qualifiers::OCL_Weak)
+ isARCWeakCapture = true;
+ }
+ } else {
+ continue;
+ }
unsigned index = capture.getIndex();
llvm::Value *srcField = Builder.CreateStructGEP(src, index);
@@ -1107,12 +1153,14 @@
// If there's an explicit copy expression, we do that.
if (copyExpr) {
EmitSynthesizedCXXCopyCtor(dstField, srcField, copyExpr);
+ } else if (isARCWeakCapture) {
+ EmitARCCopyWeak(dstField, srcField);
} else {
llvm::Value *srcValue = Builder.CreateLoad(srcField, "blockcopy.src");
srcValue = Builder.CreateBitCast(srcValue, VoidPtrTy);
llvm::Value *dstAddr = Builder.CreateBitCast(dstField, VoidPtrTy);
Builder.CreateCall3(CGM.getBlockObjectAssign(), dstAddr, srcValue,
- llvm::ConstantInt::get(Int32Ty, flags));
+ llvm::ConstantInt::get(Int32Ty, flags.getBitMask()));
}
}
@@ -1176,20 +1224,37 @@
BlockFieldFlags flags;
const CXXDestructorDecl *dtor = 0;
+ bool isARCWeakCapture = false;
+
if (ci->isByRef()) {
flags = BLOCK_FIELD_IS_BYREF;
- if (type.isObjCGCWeak()) flags |= BLOCK_FIELD_IS_WEAK;
- } else if (type->isBlockPointerType()) {
- flags = BLOCK_FIELD_IS_BLOCK;
- } else if (type->isObjCObjectPointerType() || C.isObjCNSObjectType(type)) {
+ if (type.isObjCGCWeak())
+ flags |= BLOCK_FIELD_IS_WEAK;
+ } else if (const CXXRecordDecl *record = type->getAsCXXRecordDecl()) {
+ if (record->hasTrivialDestructor())
+ continue;
+ dtor = record->getDestructor();
+ } else if (type->isObjCRetainableType()) {
flags = BLOCK_FIELD_IS_OBJECT;
- } else if (C.getLangOptions().CPlusPlus) {
- if (const CXXRecordDecl *record = type->getAsCXXRecordDecl())
- if (!record->hasTrivialDestructor())
- dtor = record->getDestructor();
- }
+ if (type->isBlockPointerType())
+ flags = BLOCK_FIELD_IS_BLOCK;
- if (!dtor && flags.empty()) continue;
+ // Special rules for ARC captures.
+ if (getLangOptions().ObjCAutoRefCount) {
+ Qualifiers qs = type.getQualifiers();
+
+ // Don't generate special dispose logic for a captured object
+ // unless it's __strong or __weak.
+ if (!qs.hasStrongOrWeakObjCLifetime())
+ continue;
+
+ // Support __weak direct captures.
+ if (qs.getObjCLifetime() == Qualifiers::OCL_Weak)
+ isARCWeakCapture = true;
+ }
+ } else {
+ continue;
+ }
unsigned index = capture.getIndex();
llvm::Value *srcField = Builder.CreateStructGEP(src, index);
@@ -1198,6 +1263,10 @@
if (dtor) {
PushDestructorCleanup(dtor, srcField);
+ // If this is a __weak capture, emit the release directly.
+ } else if (isARCWeakCapture) {
+ EmitARCDestroyWeak(srcField);
+
// Otherwise we call _Block_object_dispose. It wouldn't be too
// hard to just emit this as a cleanup if we wanted to make sure
// that things were done in reverse.
@@ -1251,6 +1320,55 @@
}
};
+/// Emits the copy/dispose helpers for an ARC __block __weak variable.
+class ARCWeakByrefHelpers : public CodeGenModule::ByrefHelpers {
+public:
+ ARCWeakByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
+
+ void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
+ llvm::Value *srcField) {
+ CGF.EmitARCMoveWeak(destField, srcField);
+ }
+
+ void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+ CGF.EmitARCDestroyWeak(field);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const {
+ // 0 is distinguishable from all pointers and byref flags
+ id.AddInteger(0);
+ }
+};
+
+/// Emits the copy/dispose helpers for an ARC __block __strong variable
+/// that's not of block-pointer type.
+class ARCStrongByrefHelpers : public CodeGenModule::ByrefHelpers {
+public:
+ ARCStrongByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
+
+ void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
+ llvm::Value *srcField) {
+ // Do a "move" by copying the value and then zeroing out the old
+ // variable.
+
+ llvm::Value *value = CGF.Builder.CreateLoad(srcField);
+ llvm::Value *null =
+ llvm::ConstantPointerNull::get(cast<llvm::PointerType>(value->getType()));
+ CGF.Builder.CreateStore(value, destField);
+ CGF.Builder.CreateStore(null, srcField);
+ }
+
+ void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+ llvm::Value *value = CGF.Builder.CreateLoad(field);
+ CGF.EmitARCRelease(value, /*precise*/ false);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const {
+ // 1 is distinguishable from all pointers and byref flags
+ id.AddInteger(1);
+ }
+};
+
/// Emits the copy/dispose helpers for a __block variable with a
/// nontrivial copy constructor or destructor.
class CXXByrefHelpers : public CodeGenModule::ByrefHelpers {
@@ -1318,6 +1436,7 @@
SC_Static,
SC_None,
false, true);
+
CGF.StartFunction(FD, R, Fn, FI, args, SourceLocation());
if (byrefInfo.needsCopy()) {
@@ -1449,6 +1568,52 @@
return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
}
+ // Otherwise, if we don't have a retainable type, there's nothing to do.
+ // that the runtime does extra copies.
+ if (!type->isObjCRetainableType()) return 0;
+
+ Qualifiers qs = type.getQualifiers();
+
+ // If we have lifetime, that dominates.
+ if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) {
+ assert(getLangOptions().ObjCAutoRefCount);
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None: llvm_unreachable("impossible");
+
+ // These are just bits as far as the runtime is concerned.
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ return 0;
+
+ // Tell the runtime that this is ARC __weak, called by the
+ // byref routines.
+ case Qualifiers::OCL_Weak: {
+ ARCWeakByrefHelpers byrefInfo(emission.Alignment);
+ return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
+ }
+
+ // ARC __strong __block variables need to be retained.
+ case Qualifiers::OCL_Strong:
+ // Block-pointers need to be _Block_copy'ed, so we let the
+ // runtime be in charge. But we can't use the code below
+ // because we don't want to set BYREF_CALLER, which will
+ // just make the runtime ignore us.
+ if (type->isBlockPointerType()) {
+ BlockFieldFlags flags = BLOCK_FIELD_IS_BLOCK;
+ ObjectByrefHelpers byrefInfo(emission.Alignment, flags);
+ return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
+
+ // Otherwise, we transfer ownership of the retain from the stack
+ // to the heap.
+ } else {
+ ARCStrongByrefHelpers byrefInfo(emission.Alignment);
+ return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
+ }
+ }
+ llvm_unreachable("fell out of lifetime switch!");
+ }
+
BlockFieldFlags flags;
if (type->isBlockPointerType()) {
flags |= BLOCK_FIELD_IS_BLOCK;
@@ -1639,6 +1804,7 @@
CallBlockRelease(llvm::Value *Addr) : Addr(Addr) {}
void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ // Should we be passing FIELD_IS_WEAK here?
CGF.BuildBlockRelease(Addr, BLOCK_FIELD_IS_BYREF);
}
};
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 9bd18e5..35d61cf 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -89,7 +89,7 @@
variable */
BLOCK_FIELD_IS_WEAK = 0x10, /* declared __weak, only used in byref copy
helpers */
-
+ BLOCK_FIELD_IS_ARC = 0x40, /* field has ARC-specific semantics */
BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
support routines */
BLOCK_BYREF_CURRENT_MAX = 256
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 05c11ce..712ec62 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -25,6 +25,7 @@
#include "llvm/Attributes.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/InlineAsm.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace clang;
using namespace CodeGen;
@@ -190,13 +191,15 @@
e = MD->param_end(); i != e; ++i) {
ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
}
- return getFunctionInfo(GetReturnType(MD->getResultType()),
- ArgTys,
- FunctionType::ExtInfo(
- /*NoReturn*/ false,
- /*HasRegParm*/ false,
- /*RegParm*/ 0,
- getCallingConventionForDecl(MD)));
+
+ FunctionType::ExtInfo einfo;
+ einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
+
+ if (getContext().getLangOptions().ObjCAutoRefCount &&
+ MD->hasAttr<NSReturnsRetainedAttr>())
+ einfo = einfo.withProducesResult(true);
+
+ return getFunctionInfo(GetReturnType(MD->getResultType()), ArgTys, einfo);
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
@@ -262,7 +265,8 @@
return *FI;
// Construct the function info.
- FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getHasRegParm(), Info.getRegParm(), ResTy,
+ FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getProducesResult(),
+ Info.getHasRegParm(), Info.getRegParm(), ResTy,
ArgTys.data(), ArgTys.size());
FunctionInfos.InsertNode(FI, InsertPos);
@@ -291,13 +295,15 @@
}
CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
- bool _NoReturn, bool _HasRegParm, unsigned _RegParm,
+ bool _NoReturn, bool returnsRetained,
+ bool _HasRegParm, unsigned _RegParm,
CanQualType ResTy,
const CanQualType *ArgTys,
unsigned NumArgTys)
: CallingConvention(_CallingConvention),
EffectiveCallingConvention(_CallingConvention),
- NoReturn(_NoReturn), HasRegParm(_HasRegParm), RegParm(_RegParm)
+ NoReturn(_NoReturn), ReturnsRetained(returnsRetained),
+ HasRegParm(_HasRegParm), RegParm(_RegParm)
{
NumArgs = NumArgTys;
@@ -1068,6 +1074,95 @@
assert(AI == Fn->arg_end() && "Argument mismatch!");
}
+/// Try to emit a fused autorelease of a return result.
+static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
+ llvm::Value *result) {
+ // We must be immediately followed the cast.
+ llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
+ if (BB->empty()) return 0;
+ if (&BB->back() != result) return 0;
+
+ const llvm::Type *resultType = result->getType();
+
+ // result is in a BasicBlock and is therefore an Instruction.
+ llvm::Instruction *generator = cast<llvm::Instruction>(result);
+
+ llvm::SmallVector<llvm::Instruction*,4> insnsToKill;
+
+ // Look for:
+ // %generator = bitcast %type1* %generator2 to %type2*
+ while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
+ // We would have emitted this as a constant if the operand weren't
+ // an Instruction.
+ generator = cast<llvm::Instruction>(bitcast->getOperand(0));
+
+ // Require the generator to be immediately followed by the cast.
+ if (generator->getNextNode() != bitcast)
+ return 0;
+
+ insnsToKill.push_back(bitcast);
+ }
+
+ // Look for:
+ // %generator = call i8* @objc_retain(i8* %originalResult)
+ // or
+ // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
+ llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
+ if (!call) return 0;
+
+ bool doRetainAutorelease;
+
+ if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
+ doRetainAutorelease = true;
+ } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
+ .objc_retainAutoreleasedReturnValue) {
+ doRetainAutorelease = false;
+
+ // Look for an inline asm immediately preceding the call and kill it, too.
+ llvm::Instruction *prev = call->getPrevNode();
+ if (llvm::CallInst *asmCall = dyn_cast_or_null<llvm::CallInst>(prev))
+ if (asmCall->getCalledValue()
+ == CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker)
+ insnsToKill.push_back(prev);
+ } else {
+ return 0;
+ }
+
+ result = call->getArgOperand(0);
+ insnsToKill.push_back(call);
+
+ // Keep killing bitcasts, for sanity. Note that we no longer care
+ // about precise ordering as long as there's exactly one use.
+ while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
+ if (!bitcast->hasOneUse()) break;
+ insnsToKill.push_back(bitcast);
+ result = bitcast->getOperand(0);
+ }
+
+ // Delete all the unnecessary instructions, from latest to earliest.
+ for (llvm::SmallVectorImpl<llvm::Instruction*>::iterator
+ i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
+ (*i)->eraseFromParent();
+
+ // Do the fused retain/autorelease if we were asked to.
+ if (doRetainAutorelease)
+ result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
+
+ // Cast back to the result type.
+ return CGF.Builder.CreateBitCast(result, resultType);
+}
+
+/// Emit an ARC autorelease of the result of a function.
+static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
+ llvm::Value *result) {
+ // At -O0, try to emit a fused retain/autorelease.
+ if (CGF.shouldUseFusedARCCalls())
+ if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
+ return fused;
+
+ return CGF.EmitARCAutoreleaseReturnValue(result);
+}
+
void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
// Functions with no result always return void.
if (ReturnValue == 0) {
@@ -1135,6 +1230,16 @@
RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
}
+
+ // In ARC, end functions that return a retainable type with a call
+ // to objc_autoreleaseReturnValue.
+ if (AutoreleaseResult) {
+ assert(getLangOptions().ObjCAutoRefCount &&
+ !FI.isReturnsRetained() &&
+ RetTy->isObjCRetainableType());
+ RV = emitAutoreleaseOfResult(*this, RV);
+ }
+
break;
case ABIArgInfo::Ignore:
@@ -1184,8 +1289,152 @@
return args.add(RValue::get(value), type);
}
+static bool isProvablyNull(llvm::Value *addr) {
+ return isa<llvm::ConstantPointerNull>(addr);
+}
+
+static bool isProvablyNonNull(llvm::Value *addr) {
+ return isa<llvm::AllocaInst>(addr);
+}
+
+/// Emit the actual writing-back of a writeback.
+static void emitWriteback(CodeGenFunction &CGF,
+ const CallArgList::Writeback &writeback) {
+ llvm::Value *srcAddr = writeback.Address;
+ assert(!isProvablyNull(srcAddr) &&
+ "shouldn't have writeback for provably null argument");
+
+ llvm::BasicBlock *contBB = 0;
+
+ // If the argument wasn't provably non-null, we need to null check
+ // before doing the store.
+ bool provablyNonNull = isProvablyNonNull(srcAddr);
+ if (!provablyNonNull) {
+ llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
+ contBB = CGF.createBasicBlock("icr.done");
+
+ llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
+ CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
+ CGF.EmitBlock(writebackBB);
+ }
+
+ // Load the value to writeback.
+ llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
+
+ // Cast it back, in case we're writing an id to a Foo* or something.
+ value = CGF.Builder.CreateBitCast(value,
+ cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
+ "icr.writeback-cast");
+
+ // Perform the writeback.
+ QualType srcAddrType = writeback.AddressType;
+ CGF.EmitStoreThroughLValue(RValue::get(value),
+ CGF.MakeAddrLValue(srcAddr, srcAddrType),
+ srcAddrType);
+
+ // Jump to the continuation block.
+ if (!provablyNonNull)
+ CGF.EmitBlock(contBB);
+}
+
+static void emitWritebacks(CodeGenFunction &CGF,
+ const CallArgList &args) {
+ for (CallArgList::writeback_iterator
+ i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
+ emitWriteback(CGF, *i);
+}
+
+/// Emit an argument that's being passed call-by-writeback. That is,
+/// we are passing the address of
+static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
+ const ObjCIndirectCopyRestoreExpr *CRE) {
+ llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
+
+ // The dest and src types don't necessarily match in LLVM terms
+ // because of the crazy ObjC compatibility rules.
+
+ const llvm::PointerType *destType =
+ cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
+
+ // If the address is a constant null, just pass the appropriate null.
+ if (isProvablyNull(srcAddr)) {
+ args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
+ CRE->getType());
+ return;
+ }
+
+ QualType srcAddrType =
+ CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
+
+ // Create the temporary.
+ llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
+ "icr.temp");
+
+ // Zero-initialize it if we're not doing a copy-initialization.
+ bool shouldCopy = CRE->shouldCopy();
+ if (!shouldCopy) {
+ llvm::Value *null =
+ llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(destType->getElementType()));
+ CGF.Builder.CreateStore(null, temp);
+ }
+
+ llvm::BasicBlock *contBB = 0;
+
+ // If the address is *not* known to be non-null, we need to switch.
+ llvm::Value *finalArgument;
+
+ bool provablyNonNull = isProvablyNonNull(srcAddr);
+ if (provablyNonNull) {
+ finalArgument = temp;
+ } else {
+ llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
+
+ finalArgument = CGF.Builder.CreateSelect(isNull,
+ llvm::ConstantPointerNull::get(destType),
+ temp, "icr.argument");
+
+ // If we need to copy, then the load has to be conditional, which
+ // means we need control flow.
+ if (shouldCopy) {
+ contBB = CGF.createBasicBlock("icr.cont");
+ llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
+ CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
+ CGF.EmitBlock(copyBB);
+ }
+ }
+
+ // Perform a copy if necessary.
+ if (shouldCopy) {
+ LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
+ RValue srcRV = CGF.EmitLoadOfLValue(srcLV, srcAddrType);
+ assert(srcRV.isScalar());
+
+ llvm::Value *src = srcRV.getScalarVal();
+ src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
+ "icr.cast");
+
+ // Use an ordinary store, not a store-to-lvalue.
+ CGF.Builder.CreateStore(src, temp);
+ }
+
+ // Finish the control flow if we needed it.
+ if (shouldCopy && !provablyNonNull)
+ CGF.EmitBlock(contBB);
+
+ args.addWriteback(srcAddr, srcAddrType, temp);
+ args.add(RValue::get(finalArgument), CRE->getType());
+}
+
void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
QualType type) {
+ if (const ObjCIndirectCopyRestoreExpr *CRE
+ = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
+ assert(getContext().getLangOptions().ObjCAutoRefCount);
+ assert(getContext().hasSameType(E->getType(), type));
+ return emitWritebackArg(*this, args, CRE);
+ }
+
if (type->isReferenceType())
return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
type);
@@ -1435,6 +1684,11 @@
if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
CI->setName("call");
+ // Emit any writebacks immediately. Arguably this should happen
+ // after any return-value munging.
+ if (CallArgs.hasWritebacks())
+ emitWritebacks(*this, CallArgs);
+
switch (RetAI.getKind()) {
case ABIArgInfo::Indirect: {
unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 160a62e..343b944 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -58,9 +58,44 @@
class CallArgList :
public llvm::SmallVector<CallArg, 16> {
public:
+ struct Writeback {
+ /// The original argument.
+ llvm::Value *Address;
+
+ /// The pointee type of the original argument.
+ QualType AddressType;
+
+ /// The temporary alloca.
+ llvm::Value *Temporary;
+ };
+
void add(RValue rvalue, QualType type, bool needscopy = false) {
push_back(CallArg(rvalue, type, needscopy));
}
+
+ void addFrom(const CallArgList &other) {
+ insert(end(), other.begin(), other.end());
+ Writebacks.insert(Writebacks.end(),
+ other.Writebacks.begin(), other.Writebacks.end());
+ }
+
+ void addWriteback(llvm::Value *address, QualType addressType,
+ llvm::Value *temporary) {
+ Writeback writeback;
+ writeback.Address = address;
+ writeback.AddressType = addressType;
+ writeback.Temporary = temporary;
+ Writebacks.push_back(writeback);
+ }
+
+ bool hasWritebacks() const { return !Writebacks.empty(); }
+
+ typedef llvm::SmallVectorImpl<Writeback>::const_iterator writeback_iterator;
+ writeback_iterator writeback_begin() const { return Writebacks.begin(); }
+ writeback_iterator writeback_end() const { return Writebacks.end(); }
+
+ private:
+ llvm::SmallVector<Writeback, 1> Writebacks;
};
/// FunctionArgList - Type for representing both the decl and type
@@ -88,6 +123,9 @@
/// Whether this function is noreturn.
bool NoReturn;
+ /// Whether this function is returns-retained.
+ bool ReturnsRetained;
+
unsigned NumArgs;
ArgInfo *Args;
@@ -100,7 +138,8 @@
typedef ArgInfo *arg_iterator;
CGFunctionInfo(unsigned CallingConvention, bool NoReturn,
- bool HasRegParm, unsigned RegParm, CanQualType ResTy,
+ bool ReturnsRetained, bool HasRegParm, unsigned RegParm,
+ CanQualType ResTy,
const CanQualType *ArgTys, unsigned NumArgTys);
~CGFunctionInfo() { delete[] Args; }
@@ -113,6 +152,10 @@
bool isNoReturn() const { return NoReturn; }
+ /// In ARR, whether this function retains its return value. This
+ /// is not always reliable for call sites.
+ bool isReturnsRetained() const { return ReturnsRetained; }
+
/// getCallingConvention - Return the user specified calling
/// convention.
unsigned getCallingConvention() const { return CallingConvention; }
@@ -137,6 +180,7 @@
void Profile(llvm::FoldingSetNodeID &ID) {
ID.AddInteger(getCallingConvention());
ID.AddBoolean(NoReturn);
+ ID.AddBoolean(ReturnsRetained);
ID.AddBoolean(HasRegParm);
ID.AddInteger(RegParm);
getReturnType().Profile(ID);
@@ -151,6 +195,7 @@
Iterator end) {
ID.AddInteger(Info.getCC());
ID.AddBoolean(Info.getNoReturn());
+ ID.AddBoolean(Info.getProducesResult());
ID.AddBoolean(Info.getHasRegParm());
ID.AddInteger(Info.getRegParm());
ResTy.Profile(ID);
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index 5725d80..066f0d5 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -398,7 +398,8 @@
BaseClassDecl,
isBaseVirtual);
- AggValueSlot AggSlot = AggValueSlot::forAddr(V, false, /*Lifetime*/ true);
+ AggValueSlot AggSlot = AggValueSlot::forAddr(V, Qualifiers(),
+ /*Lifetime*/ true);
CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
@@ -428,10 +429,20 @@
CGF.Builder.CreateStore(Next, ArrayIndexVar);
}
- AggValueSlot Slot = AggValueSlot::forAddr(Dest, LHS.isVolatileQualified(),
- /*Lifetime*/ true);
-
- CGF.EmitAggExpr(MemberInit->getInit(), Slot);
+ if (!CGF.hasAggregateLLVMType(T)) {
+ CGF.EmitScalarInit(MemberInit->getInit(), 0, Dest, false,
+ LHS.isVolatileQualified(),
+ CGF.getContext().getTypeAlign(T),
+ T);
+ } else if (T->isAnyComplexType()) {
+ CGF.EmitComplexExprIntoAddr(MemberInit->getInit(), Dest,
+ LHS.isVolatileQualified());
+ } else {
+ AggValueSlot Slot = AggValueSlot::forAddr(Dest, LHS.getQuals(),
+ /*Lifetime*/ true);
+
+ CGF.EmitAggExpr(MemberInit->getInit(), Slot);
+ }
return;
}
@@ -540,15 +551,16 @@
// FIXME: If there's no initializer and the CXXCtorInitializer
// was implicitly generated, we shouldn't be zeroing memory.
- RValue RHS;
- if (FieldType->isReferenceType()) {
- RHS = CGF.EmitReferenceBindingToExpr(MemberInit->getInit(), Field);
- CGF.EmitStoreThroughLValue(RHS, LHS, FieldType);
- } else if (FieldType->isArrayType() && !MemberInit->getInit()) {
+ if (FieldType->isArrayType() && !MemberInit->getInit()) {
CGF.EmitNullInitialization(LHS.getAddress(), Field->getType());
} else if (!CGF.hasAggregateLLVMType(Field->getType())) {
- RHS = RValue::get(CGF.EmitScalarExpr(MemberInit->getInit()));
- CGF.EmitStoreThroughLValue(RHS, LHS, FieldType);
+ if (LHS.isSimple()) {
+ CGF.EmitExprAsInit(MemberInit->getInit(), Field, LHS.getAddress(),
+ CGF.getContext().getDeclAlign(Field), false);
+ } else {
+ RValue RHS = RValue::get(CGF.EmitScalarExpr(MemberInit->getInit()));
+ CGF.EmitStoreThroughLValue(RHS, LHS, FieldType);
+ }
} else if (MemberInit->getInit()->getType()->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(MemberInit->getInit(), LHS.getAddress(),
LHS.isVolatileQualified());
@@ -576,11 +588,11 @@
llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
CGF.Builder.CreateStore(Zero, ArrayIndexVar);
- // If we are copying an array of scalars or classes with trivial copy
+ // If we are copying an array of PODs or classes with trivial copy
// constructors, perform a single aggregate copy.
- const RecordType *Record = BaseElementTy->getAs<RecordType>();
- if (!Record ||
- cast<CXXRecordDecl>(Record->getDecl())->hasTrivialCopyConstructor()) {
+ const CXXRecordDecl *Record = BaseElementTy->getAsCXXRecordDecl();
+ if (BaseElementTy.isPODType(CGF.getContext()) ||
+ (Record && Record->hasTrivialCopyConstructor())) {
// Find the source pointer. We knows it's the last argument because
// we know we're in a copy constructor.
unsigned SrcArgIndex = Args.size() - 1;
@@ -925,12 +937,8 @@
CallArrayFieldDtor(const FieldDecl *Field) : Field(Field) {}
void Emit(CodeGenFunction &CGF, bool IsForEH) {
- QualType FieldType = Field->getType();
- const ConstantArrayType *Array =
- CGF.getContext().getAsConstantArrayType(FieldType);
-
- QualType BaseType =
- CGF.getContext().getBaseElementType(Array->getElementType());
+ QualType FieldType = Field->getType();
+ QualType BaseType = CGF.getContext().getBaseElementType(FieldType);
const CXXRecordDecl *FieldClassDecl = BaseType->getAsCXXRecordDecl();
llvm::Value *ThisPtr = CGF.LoadCXXThis();
@@ -938,9 +946,12 @@
// FIXME: Qualifiers?
/*CVRQualifiers=*/0);
- const llvm::Type *BasePtr = CGF.ConvertType(BaseType)->getPointerTo();
- llvm::Value *BaseAddrPtr =
- CGF.Builder.CreateBitCast(LHS.getAddress(), BasePtr);
+ const llvm::Type *BasePtr
+ = CGF.ConvertType(BaseType)->getPointerTo();
+ llvm::Value *BaseAddrPtr
+ = CGF.Builder.CreateBitCast(LHS.getAddress(), BasePtr);
+ const ConstantArrayType *Array
+ = CGF.getContext().getAsConstantArrayType(FieldType);
CGF.EmitCXXAggrDestructorCall(FieldClassDecl->getDestructor(),
Array, BaseAddrPtr);
}
@@ -1042,19 +1053,26 @@
getContext().getAsConstantArrayType(FieldType);
if (Array)
FieldType = getContext().getBaseElementType(Array->getElementType());
-
- const RecordType *RT = FieldType->getAs<RecordType>();
- if (!RT)
- continue;
-
- CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
- if (FieldClassDecl->hasTrivialDestructor())
- continue;
- if (Array)
- EHStack.pushCleanup<CallArrayFieldDtor>(NormalAndEHCleanup, Field);
- else
- EHStack.pushCleanup<CallFieldDtor>(NormalAndEHCleanup, Field);
+ switch (FieldType.isDestructedType()) {
+ case QualType::DK_none:
+ continue;
+
+ case QualType::DK_cxx_destructor:
+ if (Array)
+ EHStack.pushCleanup<CallArrayFieldDtor>(NormalAndEHCleanup, Field);
+ else
+ EHStack.pushCleanup<CallFieldDtor>(NormalAndEHCleanup, Field);
+ break;
+
+ case QualType::DK_objc_strong_lifetime:
+ PushARCFieldReleaseCleanup(getARCCleanupKind(), Field);
+ break;
+
+ case QualType::DK_objc_weak_lifetime:
+ PushARCFieldWeakReleaseCleanup(getARCCleanupKind(), Field);
+ break;
+ }
}
}
@@ -1384,7 +1402,8 @@
llvm::Value *ThisPtr = LoadCXXThis();
- AggValueSlot AggSlot = AggValueSlot::forAddr(ThisPtr, false, /*Lifetime*/ true);
+ AggValueSlot AggSlot =
+ AggValueSlot::forAddr(ThisPtr, Qualifiers(), /*Lifetime*/ true);
EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index 98d30db..88cc5be 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -389,6 +389,7 @@
// Ignore these qualifiers for now.
Qc.removeObjCGCAttr();
Qc.removeAddressSpace();
+ Qc.removeObjCLifetime();
// We will create one Derived type for one qualifier and recurse to handle any
// additional ones.
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 33c4ada..14e999f 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -353,9 +353,7 @@
if (NRVO) CGF.EmitBlock(SkipDtorBB);
}
};
-}
-namespace {
struct CallStackRestore : EHScopeStack::Cleanup {
llvm::Value *Stack;
CallStackRestore(llvm::Value *Stack) : Stack(Stack) {}
@@ -400,6 +398,164 @@
};
}
+/// EmitAutoVarWithLifetime - Does the setup required for an automatic
+/// variable with lifetime.
+static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
+ llvm::Value *addr,
+ Qualifiers::ObjCLifetime lifetime) {
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing to do
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ CGF.PushARCReleaseCleanup(CGF.getARCCleanupKind(),
+ var.getType(), addr,
+ var.hasAttr<ObjCPreciseLifetimeAttr>());
+ break;
+ }
+ case Qualifiers::OCL_Autoreleasing:
+ // nothing to do
+ break;
+
+ case Qualifiers::OCL_Weak:
+ // __weak objects always get EH cleanups; otherwise, exceptions
+ // could cause really nasty crashes instead of mere leaks.
+ CGF.PushARCWeakReleaseCleanup(NormalAndEHCleanup, var.getType(), addr);
+ break;
+ }
+}
+
+static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
+ if (const Expr *e = dyn_cast<Expr>(s)) {
+ // Skip the most common kinds of expressions that make
+ // hierarchy-walking expensive.
+ s = e = e->IgnoreParenCasts();
+
+ if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
+ return (ref->getDecl() == &var);
+ }
+
+ for (Stmt::const_child_range children = s->children(); children; ++children)
+ if (isAccessedBy(var, *children))
+ return true;
+
+ return false;
+}
+
+static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
+ if (!decl) return false;
+ if (!isa<VarDecl>(decl)) return false;
+ const VarDecl *var = cast<VarDecl>(decl);
+ return isAccessedBy(*var, e);
+}
+
+void CodeGenFunction::EmitScalarInit(const Expr *init,
+ const ValueDecl *D,
+ llvm::Value *addr, bool capturedByInit,
+ bool isVolatile, unsigned alignment,
+ QualType type) {
+ Qualifiers::ObjCLifetime lifetime = type.getQualifiers().getObjCLifetime();
+ if (!lifetime) {
+ llvm::Value *value = EmitScalarExpr(init);
+ if (capturedByInit) addr = BuildBlockByrefAddress(addr, cast<VarDecl>(D));
+ EmitStoreOfScalar(value, addr, isVolatile, alignment, type);
+ return;
+ }
+
+ // If we're emitting a value with lifetime, we have to do the
+ // initialization *before* we leave the cleanup scopes.
+ CodeGenFunction::RunCleanupsScope Scope(*this);
+ if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(init))
+ init = ewc->getSubExpr();
+
+ // We have to maintain the illusion that the variable is
+ // zero-initialized. If the variable might be accessed in its
+ // initializer, zero-initialize before running the initializer, then
+ // actually perform the initialization with an assign.
+ bool accessedByInit = false;
+ if (lifetime != Qualifiers::OCL_ExplicitNone)
+ accessedByInit = isAccessedBy(D, init);
+ if (accessedByInit) {
+ // Drill down to the __block object if necessary.
+ llvm::Value *tempAddr = addr;
+ if (capturedByInit) {
+ // We can use a simple GEP for this because it can't have been
+ // moved yet.
+ tempAddr = Builder.CreateStructGEP(tempAddr,
+ getByRefValueLLVMField(cast<VarDecl>(D)));
+ }
+
+ const llvm::PointerType *ty = cast<llvm::PointerType>(tempAddr->getType());
+ ty = cast<llvm::PointerType>(ty->getElementType());
+
+ llvm::Value *zero = llvm::ConstantPointerNull::get(ty);
+
+ // If __weak, we want to use a barrier under certain conditions.
+ if (lifetime == Qualifiers::OCL_Weak)
+ EmitARCInitWeak(tempAddr, zero);
+
+ // Otherwise just do a simple store.
+ else
+ EmitStoreOfScalar(zero, tempAddr, isVolatile, alignment, type);
+ }
+
+ // Emit the initializer.
+ llvm::Value *value = 0;
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing to do
+ value = EmitScalarExpr(init);
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ value = EmitARCRetainScalarExpr(init);
+ break;
+ }
+
+ case Qualifiers::OCL_Weak: {
+ // No way to optimize a producing initializer into this. It's not
+ // worth optimizing for, because the value will immediately
+ // disappear in the common case.
+ value = EmitScalarExpr(init);
+
+ if (capturedByInit) addr = BuildBlockByrefAddress(addr, cast<VarDecl>(D));
+ if (accessedByInit)
+ EmitARCStoreWeak(addr, value, /*ignored*/ true);
+ else
+ EmitARCInitWeak(addr, value);
+ return;
+ }
+
+ case Qualifiers::OCL_Autoreleasing:
+ value = EmitARCRetainAutoreleaseScalarExpr(init);
+ break;
+ }
+
+ if (capturedByInit) addr = BuildBlockByrefAddress(addr, cast<VarDecl>(D));
+
+ llvm::MDNode *tbaa = CGM.getTBAAInfo(type);
+
+ // If the variable might have been accessed by its initializer, we
+ // might have to initialize with a barrier. We have to do this for
+ // both __weak and __strong, but __weak got filtered out above.
+ if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
+ llvm::Value *oldValue
+ = EmitLoadOfScalar(addr, isVolatile, alignment, type, tbaa);
+ EmitStoreOfScalar(value, addr, isVolatile, alignment, type, tbaa);
+ EmitARCRelease(oldValue, /*precise*/ false);
+ return;
+ }
+
+ EmitStoreOfScalar(value, addr, isVolatile, alignment, type, tbaa);
+}
/// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the
/// non-zero parts of the specified initializer with equal or fewer than
@@ -521,7 +677,9 @@
// arrays as long as the initialization is trivial (e.g. if they
// have a non-trivial destructor, but not a non-trivial constructor).
if (D.getInit() &&
- (Ty->isArrayType() || Ty->isRecordType()) && Ty->isPODType() &&
+ (Ty->isArrayType() || Ty->isRecordType()) &&
+ (Ty.isPODType(getContext()) ||
+ getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
D.getInit()->isConstantInitializer(getContext(), false)) {
// If the variable's a const type, and it's neither an NRVO
@@ -765,29 +923,30 @@
/// \param capturedByInit true if the variable is a __block variable
/// whose address is potentially changed by the initializer
void CodeGenFunction::EmitExprAsInit(const Expr *init,
- const VarDecl *var,
+ const ValueDecl *D,
llvm::Value *loc,
CharUnits alignment,
bool capturedByInit) {
- QualType type = var->getType();
+ QualType type = D->getType();
bool isVolatile = type.isVolatileQualified();
if (type->isReferenceType()) {
- RValue RV = EmitReferenceBindingToExpr(init, var);
- if (capturedByInit) loc = BuildBlockByrefAddress(loc, var);
+ RValue RV = EmitReferenceBindingToExpr(init, D);
+ if (capturedByInit)
+ loc = BuildBlockByrefAddress(loc, cast<VarDecl>(D));
EmitStoreOfScalar(RV.getScalarVal(), loc, false,
alignment.getQuantity(), type);
} else if (!hasAggregateLLVMType(type)) {
- llvm::Value *V = EmitScalarExpr(init);
- if (capturedByInit) loc = BuildBlockByrefAddress(loc, var);
- EmitStoreOfScalar(V, loc, isVolatile, alignment.getQuantity(), type);
+ EmitScalarInit(init, D, loc, capturedByInit, isVolatile,
+ alignment.getQuantity(), type);
} else if (type->isAnyComplexType()) {
ComplexPairTy complex = EmitComplexExpr(init);
- if (capturedByInit) loc = BuildBlockByrefAddress(loc, var);
+ if (capturedByInit) loc = BuildBlockByrefAddress(loc, cast<VarDecl>(D));
StoreComplexToAddr(complex, loc, isVolatile);
} else {
// TODO: how can we delay here if D is captured by its initializer?
- EmitAggExpr(init, AggValueSlot::forAddr(loc, isVolatile, true, false));
+ EmitAggExpr(init, AggValueSlot::forAddr(loc, type.getQualifiers(), true,
+ false));
}
}
@@ -799,7 +958,7 @@
const VarDecl &D = *emission.Variable;
- // Handle C++ destruction of variables.
+ // Handle C++ or ARC destruction of variables.
if (getLangOptions().CPlusPlus) {
QualType type = D.getType();
QualType baseType = getContext().getBaseElementType(type);
@@ -830,6 +989,12 @@
}
}
+ if (Qualifiers::ObjCLifetime lifetime
+ = D.getType().getQualifiers().getObjCLifetime()) {
+ llvm::Value *loc = emission.getObjectAddress(*this);
+ EmitAutoVarWithLifetime(*this, D, loc, lifetime);
+ }
+
// Handle the cleanup attribute.
if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
const FunctionDecl *FD = CA->getFunctionDecl();
@@ -847,6 +1012,22 @@
enterByrefCleanup(emission);
}
+namespace {
+ /// A cleanup to perform a release of an object at the end of a
+ /// function. This is used to balance out the incoming +1 of a
+ /// ns_consumed argument when we can't reasonably do that just by
+ /// not doing the initial retain for a __block argument.
+ struct ConsumeARCParameter : EHScopeStack::Cleanup {
+ ConsumeARCParameter(llvm::Value *param) : Param(param) {}
+
+ llvm::Value *Param;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ CGF.EmitARCRelease(Param, /*precise*/ false);
+ }
+ };
+}
+
/// Emit an alloca (or GlobalValue depending on target)
/// for the specified parameter and set up LocalDeclMap.
void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
@@ -883,10 +1064,53 @@
// Otherwise, create a temporary to hold the value.
DeclPtr = CreateMemTemp(Ty, D.getName() + ".addr");
+ bool doStore = true;
+
+ Qualifiers qs = Ty.getQualifiers();
+
+ if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
+ // We honor __attribute__((ns_consumed)) for types with lifetime.
+ // For __strong, it's handled by just skipping the initial retain;
+ // otherwise we have to balance out the initial +1 with an extra
+ // cleanup to do the release at the end of the function.
+ bool isConsumed = D.hasAttr<NSConsumedAttr>();
+
+ // 'self' is always formally __strong, but if this is not an
+ // init method then we don't want to retain it.
+ if (lt == Qualifiers::OCL_Strong && qs.hasConst() &&
+ isa<ImplicitParamDecl>(D)) {
+ const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CurCodeDecl);
+ assert(&D == method->getSelfDecl());
+ assert(method->getMethodFamily() != OMF_init);
+ lt = Qualifiers::OCL_ExplicitNone;
+ }
+
+ if (lt == Qualifiers::OCL_Strong) {
+ if (!isConsumed)
+ // Don't use objc_retainBlock for block pointers, because we
+ // don't want to Block_copy something just because we got it
+ // as a parameter.
+ Arg = EmitARCRetainNonBlock(Arg);
+ } else {
+ // Push the cleanup for a consumed parameter.
+ if (isConsumed)
+ EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), Arg);
+
+ if (lt == Qualifiers::OCL_Weak) {
+ EmitARCInitWeak(DeclPtr, Arg);
+ doStore = false; // The weak init is a store, no need to do two
+ }
+ }
+
+ // Enter the cleanup scope.
+ EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
+ }
+
// Store the initial value into the alloca.
- EmitStoreOfScalar(Arg, DeclPtr, Ty.isVolatileQualified(),
- getContext().getDeclAlign(&D).getQuantity(), Ty,
- CGM.getTBAAInfo(Ty));
+ if (doStore)
+ EmitStoreOfScalar(Arg, DeclPtr, Ty.isVolatileQualified(),
+ getContext().getDeclAlign(&D).getQuantity(), Ty,
+ CGM.getTBAAInfo(Ty));
}
llvm::Value *&DMEntry = LocalDeclMap[&D];
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 178badd..08e8e0c 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -34,20 +34,22 @@
unsigned Alignment = Context.getDeclAlign(&D).getQuantity();
if (!CGF.hasAggregateLLVMType(T)) {
- llvm::Value *V = CGF.EmitScalarExpr(Init);
CodeGenModule &CGM = CGF.CGM;
Qualifiers::GC GCAttr = CGM.getContext().getObjCGCAttrKind(T);
if (GCAttr == Qualifiers::Strong)
- CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, V, DeclPtr,
- D.isThreadSpecified());
+ CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init),
+ DeclPtr, D.isThreadSpecified());
else if (GCAttr == Qualifiers::Weak)
- CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, V, DeclPtr);
+ CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init),
+ DeclPtr);
else
- CGF.EmitStoreOfScalar(V, DeclPtr, isVolatile, Alignment, T);
+ CGF.EmitScalarInit(Init, &D, DeclPtr, false, isVolatile, Alignment,
+ D.getType());
} else if (T->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(Init, DeclPtr, isVolatile);
} else {
- CGF.EmitAggExpr(Init, AggValueSlot::forAddr(DeclPtr, isVolatile, true));
+ CGF.EmitAggExpr(Init, AggValueSlot::forAddr(DeclPtr, T.getQualifiers(),
+ true));
}
}
@@ -291,10 +293,21 @@
getTypes().getNullaryFunctionInfo(),
FunctionArgList(), SourceLocation());
+ RunCleanupsScope Scope(*this);
+
+ // When building in Objective-C++ ARC mode, create an autorelease pool
+ // around the global initializers.
+ if (getLangOptions().ObjCAutoRefCount && getLangOptions().CPlusPlus) {
+ llvm::Value *token = EmitObjCAutoreleasePoolPush();
+ EmitObjCAutoreleasePoolCleanup(token);
+ }
+
for (unsigned i = 0; i != NumDecls; ++i)
if (Decls[i])
- Builder.CreateCall(Decls[i]);
+ Builder.CreateCall(Decls[i]);
+ Scope.ForceCleanup();
+
FinishFunction();
}
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index e8ad6da..ce57d02 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -354,7 +354,8 @@
// evaluated but before the exception is caught. But the best way
// to handle that is to teach EmitAggExpr to do the final copy
// differently if it can't be elided.
- CGF.EmitAnyExprToMem(e, typedAddr, /*Volatile*/ false, /*IsInit*/ true);
+ CGF.EmitAnyExprToMem(e, typedAddr, e->getType().getQualifiers(),
+ /*IsInit*/ true);
// Deactivate the cleanup block.
CGF.DeactivateCleanupBlock(cleanup);
@@ -1084,7 +1085,8 @@
CGF.EHStack.pushTerminate();
// Perform the copy construction.
- CGF.EmitAggExpr(copyExpr, AggValueSlot::forAddr(ParamAddr, false, false));
+ CGF.EmitAggExpr(copyExpr, AggValueSlot::forAddr(ParamAddr, Qualifiers(),
+ false));
// Leave the terminate scope.
CGF.EHStack.popTerminate();
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 525ad1b..197bc67 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -131,12 +131,12 @@
/// location.
void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
llvm::Value *Location,
- bool IsLocationVolatile,
+ Qualifiers Quals,
bool IsInit) {
if (E->getType()->isAnyComplexType())
- EmitComplexExprIntoAddr(E, Location, IsLocationVolatile);
+ EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile());
else if (hasAggregateLLVMType(E->getType()))
- EmitAggExpr(E, AggValueSlot::forAddr(Location, IsLocationVolatile, IsInit));
+ EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals, IsInit));
else {
RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
LValue LV = MakeAddrLValue(Location, E->getType());
@@ -203,7 +203,10 @@
EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
llvm::Value *&ReferenceTemporary,
const CXXDestructorDecl *&ReferenceTemporaryDtor,
+ QualType &ObjCARCReferenceLifetimeType,
const NamedDecl *InitializedDecl) {
+ ObjCARCReferenceLifetimeType = QualType();
+
if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
E = DAE->getExpr();
@@ -213,6 +216,7 @@
return EmitExprForReferenceBinding(CGF, TE->getSubExpr(),
ReferenceTemporary,
ReferenceTemporaryDtor,
+ ObjCARCReferenceLifetimeType,
InitializedDecl);
}
@@ -279,12 +283,10 @@
!E->getType()->isAnyComplexType()) {
ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
InitializedDecl);
- AggSlot = AggValueSlot::forAddr(ReferenceTemporary, false,
+ AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Qualifiers(),
InitializedDecl != 0);
}
-
- RV = CGF.EmitAnyExpr(E, AggSlot);
-
+
if (InitializedDecl) {
// Get the destructor for the reference temporary.
if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
@@ -292,8 +294,37 @@
if (!ClassDecl->hasTrivialDestructor())
ReferenceTemporaryDtor = ClassDecl->getDestructor();
}
+ else if (CGF.getContext().getLangOptions().ObjCAutoRefCount) {
+ if (const ValueDecl *InitVD = dyn_cast<ValueDecl>(InitializedDecl)) {
+ if (const ReferenceType *RefType
+ = InitVD->getType()->getAs<ReferenceType>()) {
+ QualType PointeeType = RefType->getPointeeType();
+ if (PointeeType->isObjCLifetimeType() &&
+ PointeeType.getObjCLifetime() != Qualifiers::OCL_ExplicitNone) {
+ // Objective-C++ ARC: We're binding a reference to
+ // lifetime-qualified type to a temporary, so we need to extend
+ // the lifetime of the temporary with appropriate retain/release/
+ // autorelease calls.
+ ObjCARCReferenceLifetimeType = PointeeType;
+
+ // Create a temporary variable that we can bind the reference to.
+ ReferenceTemporary = CreateReferenceTemporary(CGF, PointeeType,
+ InitializedDecl);
+
+ unsigned Alignment =
+ CGF.getContext().getTypeAlignInChars(PointeeType).getQuantity();
+ CGF.EmitScalarInit(E, InitVD, ReferenceTemporary, false,
+ PointeeType.isVolatileQualified(),
+ Alignment, PointeeType);
+ return ReferenceTemporary;
+ }
+ }
+ }
+ }
}
+ RV = CGF.EmitAnyExpr(E, AggSlot);
+
// Check if need to perform derived-to-base casts and/or field accesses, to
// get from the temporary object we created (and, potentially, for which we
// extended the lifetime) to the subobject we're binding the reference to.
@@ -361,26 +392,60 @@
const NamedDecl *InitializedDecl) {
llvm::Value *ReferenceTemporary = 0;
const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
+ QualType ObjCARCReferenceLifetimeType;
llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
ReferenceTemporaryDtor,
+ ObjCARCReferenceLifetimeType,
InitializedDecl);
- if (!ReferenceTemporaryDtor)
+ if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull())
return RValue::get(Value);
// Make sure to call the destructor for the reference temporary.
- if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
- if (VD->hasGlobalStorage()) {
+ const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl);
+ if (VD && VD->hasGlobalStorage()) {
+ if (ReferenceTemporaryDtor) {
llvm::Constant *DtorFn =
CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
EmitCXXGlobalDtorRegistration(DtorFn,
cast<llvm::Constant>(ReferenceTemporary));
-
- return RValue::get(Value);
+ } else {
+ assert(!ObjCARCReferenceLifetimeType.isNull());
+ // Note: We intentionally do not register a global "destructor" to
+ // release the object.
}
+
+ return RValue::get(Value);
}
- PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
-
+ if (ReferenceTemporaryDtor)
+ PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
+ else {
+ switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("Not a reference temporary that needs to be deallocated");
+ break;
+
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ // Nothing to do.
+ break;
+
+ case Qualifiers::OCL_Strong:
+ PushARCReleaseCleanup(getARCCleanupKind(), ObjCARCReferenceLifetimeType,
+ ReferenceTemporary,
+ VD && VD->hasAttr<ObjCPreciseLifetimeAttr>());
+ break;
+
+ case Qualifiers::OCL_Weak:
+ // __weak objects always get EH cleanups; otherwise, exceptions
+ // could cause really nasty crashes instead of mere leaks.
+ PushARCWeakReleaseCleanup(NormalAndEHCleanup,
+ ObjCARCReferenceLifetimeType,
+ ReferenceTemporary);
+ break;
+ }
+ }
+
return RValue::get(Value);
}
@@ -599,6 +664,7 @@
case Expr::CXXDynamicCastExprClass:
case Expr::CXXReinterpretCastExprClass:
case Expr::CXXConstCastExprClass:
+ case Expr::ObjCBridgedCastExprClass:
return EmitCastLValue(cast<CastExpr>(E));
}
}
@@ -668,6 +734,8 @@
return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
AddrWeakObj));
}
+ if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak)
+ return RValue::get(EmitARCLoadWeak(LV.getAddress()));
if (LV.isSimple()) {
llvm::Value *Ptr = LV.getAddress();
@@ -838,6 +906,31 @@
return EmitStoreThroughPropertyRefLValue(Src, Dst);
}
+ // There's special magic for assigning into an ARC-qualified l-value.
+ if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
+ switch (Lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing special
+ break;
+
+ case Qualifiers::OCL_Strong:
+ EmitARCStoreStrong(Dst, Ty, Src.getScalarVal(), /*ignore*/ true);
+ return;
+
+ case Qualifiers::OCL_Weak:
+ EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
+ return;
+
+ case Qualifiers::OCL_Autoreleasing:
+ Src = RValue::get(EmitObjCExtendObjectLifetime(Ty, Src.getScalarVal()));
+ // fall into the normal path
+ break;
+ }
+ }
+
if (Dst.isObjCWeak() && !Dst.isNonGC()) {
// load of a __weak object.
llvm::Value *LvalueDst = Dst.getAddress();
@@ -1113,7 +1206,12 @@
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
return;
}
-
+
+ if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+ return;
+ }
+
if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
if (LV.isObjCIvar() && !LV.isObjCArray())
@@ -1734,7 +1832,8 @@
const Expr *InitExpr = E->getInitializer();
LValue Result = MakeAddrLValue(DeclPtr, E->getType());
- EmitAnyExprToMem(InitExpr, DeclPtr, /*Volatile*/ false, /*Init*/ true);
+ EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
+ /*Init*/ true);
return Result;
}
@@ -1863,13 +1962,15 @@
case CK_DerivedToBaseMemberPointer:
case CK_BaseToDerivedMemberPointer:
case CK_MemberPointerToBoolean:
- case CK_AnyPointerToBlockPointerCast: {
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCProduceObject:
+ case CK_ObjCConsumeObject: {
// These casts only produce lvalues when we're binding a reference to a
// temporary realized from a (converted) pure rvalue. Emit the expression
// as a value, copy it into a temporary, and return an lvalue referring to
// that temporary.
llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp");
- EmitAnyExprToMem(E, V, false, false);
+ EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false);
return MakeAddrLValue(V, E->getType());
}
@@ -1988,13 +2089,60 @@
if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
- if (isa<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
- // C++ [expr.pseudo]p1:
- // The result shall only be used as the operand for the function call
- // operator (), and the result of such a call has type void. The only
- // effect is the evaluation of the postfix-expression before the dot or
- // arrow.
- EmitScalarExpr(E->getCallee());
+ if (const CXXPseudoDestructorExpr *PseudoDtor
+ = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
+ QualType DestroyedType = PseudoDtor->getDestroyedType();
+ if (getContext().getLangOptions().ObjCAutoRefCount &&
+ DestroyedType->isObjCLifetimeType() &&
+ (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
+ DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
+ // Automatic Reference Counting:
+ // If the pseudo-expression names a retainable object with weak or strong
+ // lifetime, the object shall be released.
+ bool isNonGC = false;
+ Expr *BaseExpr = PseudoDtor->getBase();
+ llvm::Value *BaseValue = NULL;
+ Qualifiers BaseQuals;
+
+ // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
+ if (PseudoDtor->isArrow()) {
+ BaseValue = EmitScalarExpr(BaseExpr);
+ const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
+ BaseQuals = PTy->getPointeeType().getQualifiers();
+ } else {
+ LValue BaseLV = EmitLValue(BaseExpr);
+ if (BaseLV.isNonGC())
+ isNonGC = true;
+ BaseValue = BaseLV.getAddress();
+ QualType BaseTy = BaseExpr->getType();
+ BaseQuals = BaseTy.getQualifiers();
+ }
+
+ switch (PseudoDtor->getDestroyedType().getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong:
+ EmitARCRelease(Builder.CreateLoad(BaseValue,
+ PseudoDtor->getDestroyedType().isVolatileQualified()),
+ /*precise*/ true);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ EmitARCDestroyWeak(BaseValue);
+ break;
+ }
+ } else {
+ // C++ [expr.pseudo]p1:
+ // The result shall only be used as the operand for the function call
+ // operator (), and the result of such a call has type void. The only
+ // effect is the evaluation of the postfix-expression before the dot or
+ // arrow.
+ EmitScalarExpr(E->getCallee());
+ }
+
return RValue::get(0);
}
@@ -2016,9 +2164,25 @@
return EmitPointerToDataMemberBinaryExpr(E);
assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
+
+ // Note that in all of these cases, __block variables need the RHS
+ // evaluated first just in case the variable gets moved by the RHS.
if (!hasAggregateLLVMType(E->getType())) {
- // __block variables need the RHS evaluated first.
+ switch (E->getLHS()->getType().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ return EmitARCStoreStrong(E, /*ignored*/ false).first;
+
+ case Qualifiers::OCL_Autoreleasing:
+ return EmitARCStoreAutoreleasing(E).first;
+
+ // No reason to do any of these differently.
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Weak:
+ break;
+ }
+
RValue RV = EmitAnyExpr(E->getRHS());
LValue LV = EmitLValue(E->getLHS());
EmitStoreThroughLValue(RV, LV, E->getType());
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index d8da642..6d34499 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -339,6 +339,8 @@
case CK_IntegralComplexToBoolean:
case CK_IntegralComplexCast:
case CK_IntegralComplexToFloatingComplex:
+ case CK_ObjCProduceObject:
+ case CK_ObjCConsumeObject:
llvm_unreachable("cast kind invalid for aggregate types");
}
}
@@ -570,8 +572,13 @@
} else if (T->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
} else if (CGF.hasAggregateLLVMType(T)) {
- CGF.EmitAggExpr(E, AggValueSlot::forAddr(LV.getAddress(), false, true,
+ CGF.EmitAggExpr(E, AggValueSlot::forAddr(LV.getAddress(),
+ T.getQualifiers(), true,
false, Dest.isZeroed()));
+ } else if (LV.isSimple()) {
+ CGF.EmitScalarInit(E, /*D=*/0, LV.getAddress(), /*Captured=*/false,
+ LV.isVolatileQualified(), LV.getAlignment(),
+ T);
} else {
CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV, T);
}
@@ -636,6 +643,8 @@
uint64_t NumArrayElements = AType->getNumElements();
QualType ElementType = CGF.getContext().getCanonicalType(E->getType());
ElementType = CGF.getContext().getAsArrayType(ElementType)->getElementType();
+ ElementType = CGF.getContext().getQualifiedType(ElementType,
+ Dest.getQualifiers());
bool hasNonTrivialCXXConstructor = false;
if (CGF.getContext().getLangOptions().CPlusPlus)
@@ -645,8 +654,6 @@
hasNonTrivialCXXConstructor = !RD->hasTrivialDefaultConstructor();
}
- // FIXME: were we intentionally ignoring address spaces and GC attributes?
-
for (uint64_t i = 0; i != NumArrayElements; ++i) {
// If we're done emitting initializers and the destination is known-zeroed
// then we're done.
@@ -873,8 +880,6 @@
///
/// \param IsInitializer - true if this evaluation is initializing an
/// object whose lifetime is already being managed.
-//
-// FIXME: Take Qualifiers object.
void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot,
bool IgnoreResult) {
assert(E && hasAggregateLLVMType(E->getType()) &&
@@ -892,7 +897,7 @@
assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!");
llvm::Value *Temp = CreateMemTemp(E->getType());
LValue LV = MakeAddrLValue(Temp, E->getType());
- EmitAggExpr(E, AggValueSlot::forAddr(Temp, LV.isVolatileQualified(), false));
+ EmitAggExpr(E, AggValueSlot::forLValue(LV, false));
return LV;
}
@@ -954,7 +959,10 @@
llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
SrcPtr = Builder.CreateBitCast(SrcPtr, SBP, "tmp");
- if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
+ // Don't do any of the memmove_collectable tests if GC isn't set.
+ if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC) {
+ // fall through
+ } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
RecordDecl *Record = RecordTy->getDecl();
if (Record->hasObjectMember()) {
CharUnits size = TypeInfo.first;
@@ -964,7 +972,7 @@
SizeVal);
return;
}
- } else if (getContext().getAsArrayType(Ty)) {
+ } else if (Ty->isArrayType()) {
QualType BaseType = getContext().getBaseElementType(Ty);
if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
if (RecordTy->getDecl()->hasObjectMember()) {
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index 81fee67..434ca1b 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -708,15 +708,14 @@
unsigned Alignment =
CGF.getContext().getTypeAlignInChars(AllocType).getQuantity();
if (!CGF.hasAggregateLLVMType(AllocType))
- CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
- AllocType.isVolatileQualified(), Alignment,
- AllocType);
+ CGF.EmitScalarInit(Init, 0, NewPtr, false, AllocType.isVolatileQualified(),
+ Alignment, AllocType);
else if (AllocType->isAnyComplexType())
CGF.EmitComplexExprIntoAddr(Init, NewPtr,
AllocType.isVolatileQualified());
else {
AggValueSlot Slot
- = AggValueSlot::forAddr(NewPtr, AllocType.isVolatileQualified(), true);
+ = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(), true);
CGF.EmitAggExpr(Init, Slot);
}
}
@@ -1075,7 +1074,7 @@
// CXXNewExpr::shouldNullCheckAllocation()) and we have an
// interesting initializer.
bool nullCheck = allocatorType->isNothrow(getContext()) &&
- !(allocType->isPODType() && !E->hasInitializer());
+ !(allocType.isPODType(getContext()) && !E->hasInitializer());
llvm::BasicBlock *nullCheckBB = 0;
llvm::BasicBlock *contBB = 0;
@@ -1247,7 +1246,29 @@
if (Dtor)
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
/*ForVirtualBase=*/false, Ptr);
+ else if (CGF.getLangOptions().ObjCAutoRefCount &&
+ ElementType->isObjCLifetimeType()) {
+ switch (ElementType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+ case Qualifiers::OCL_Strong: {
+ // Load the pointer value.
+ llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
+ ElementType.isVolatileQualified());
+
+ CGF.EmitARCRelease(PtrValue, /*precise*/ true);
+ break;
+ }
+
+ case Qualifiers::OCL_Weak:
+ CGF.EmitARCDestroyWeak(Ptr);
+ break;
+ }
+ }
+
CGF.PopCleanupBlock();
}
@@ -1339,6 +1360,65 @@
" for a class with destructor");
CGF.EmitCXXAggrDestructorCall(RD->getDestructor(), NumElements, Ptr);
}
+ } else if (CGF.getLangOptions().ObjCAutoRefCount &&
+ ElementType->isObjCLifetimeType() &&
+ (ElementType.getObjCLifetime() == Qualifiers::OCL_Strong ||
+ ElementType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
+ bool IsStrong = ElementType.getObjCLifetime() == Qualifiers::OCL_Strong;
+ const llvm::Type *SizeLTy = CGF.ConvertType(CGF.getContext().getSizeType());
+ llvm::Value *One = llvm::ConstantInt::get(SizeLTy, 1);
+
+ // Create a temporary for the loop index and initialize it with count of
+ // array elements.
+ llvm::Value *IndexPtr = CGF.CreateTempAlloca(SizeLTy, "loop.index");
+
+ // Store the number of elements in the index pointer.
+ CGF.Builder.CreateStore(NumElements, IndexPtr);
+
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end");
+
+ CGF.EmitBlock(CondBlock);
+
+ llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body");
+
+ // Generate: if (loop-index != 0 fall to the loop body,
+ // otherwise, go to the block after the for-loop.
+ llvm::Value* zeroConstant = llvm::Constant::getNullValue(SizeLTy);
+ llvm::Value *Counter = CGF.Builder.CreateLoad(IndexPtr);
+ llvm::Value *IsNE = CGF.Builder.CreateICmpNE(Counter, zeroConstant,
+ "isne");
+ // If the condition is true, execute the body.
+ CGF.Builder.CreateCondBr(IsNE, ForBody, AfterFor);
+
+ CGF.EmitBlock(ForBody);
+
+ llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc");
+ // Inside the loop body, emit the constructor call on the array element.
+ Counter = CGF.Builder.CreateLoad(IndexPtr);
+ Counter = CGF.Builder.CreateSub(Counter, One);
+ llvm::Value *Address = CGF.Builder.CreateInBoundsGEP(Ptr, Counter,
+ "arrayidx");
+ if (IsStrong)
+ CGF.EmitARCRelease(CGF.Builder.CreateLoad(Address,
+ ElementType.isVolatileQualified()),
+ /*precise*/ true);
+ else
+ CGF.EmitARCDestroyWeak(Address);
+
+ CGF.EmitBlock(ContinueBlock);
+
+ // Emit the decrement of the loop counter.
+ Counter = CGF.Builder.CreateLoad(IndexPtr);
+ Counter = CGF.Builder.CreateSub(Counter, One, "dec");
+ CGF.Builder.CreateStore(Counter, IndexPtr);
+
+ // Finally, branch back up to the condition for the next iteration.
+ CGF.EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ CGF.EmitBlock(AfterFor, true);
}
CGF.PopCleanupBlock();
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index da37bd5..75e5661 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -570,6 +570,8 @@
case CK_GetObjCProperty:
case CK_ToVoid:
case CK_Dynamic:
+ case CK_ObjCProduceObject:
+ case CK_ObjCConsumeObject:
return 0;
// These might need to be supported for constexpr.
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index dff7bf4..6e42da8 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -1106,7 +1106,12 @@
// function pointers on Itanium and ARM).
return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
}
-
+
+ case CK_ObjCProduceObject:
+ return CGF.EmitARCRetainScalarExpr(E);
+ case CK_ObjCConsumeObject:
+ return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
+
case CK_FloatingRealToComplex:
case CK_FloatingComplexCast:
case CK_IntegralRealToComplex:
@@ -2228,20 +2233,42 @@
Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
bool Ignore = TestAndClearIgnoreResultAssign();
- // __block variables need to have the rhs evaluated first, plus this should
- // improve codegen just a little.
- Value *RHS = Visit(E->getRHS());
- LValue LHS = EmitCheckedLValue(E->getLHS());
+ Value *RHS;
+ LValue LHS;
- // Store the value into the LHS. Bit-fields are handled specially
- // because the result is altered by the store, i.e., [C99 6.5.16p1]
- // 'An assignment expression has the value of the left operand after
- // the assignment...'.
- if (LHS.isBitField())
- CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(),
- &RHS);
- else
- CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS, E->getType());
+ switch (E->getLHS()->getType().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ llvm::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
+ break;
+
+ case Qualifiers::OCL_Autoreleasing:
+ llvm::tie(LHS,RHS) = CGF.EmitARCStoreAutoreleasing(E);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ RHS = Visit(E->getRHS());
+ LHS = EmitCheckedLValue(E->getLHS());
+ RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
+ break;
+
+ // No reason to do any of these differently.
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ // __block variables need to have the rhs evaluated first, plus
+ // this should improve codegen just a little.
+ RHS = Visit(E->getRHS());
+ LHS = EmitCheckedLValue(E->getLHS());
+
+ // Store the value into the LHS. Bit-fields are handled specially
+ // because the result is altered by the store, i.e., [C99 6.5.16p1]
+ // 'An assignment expression has the value of the left operand after
+ // the assignment...'.
+ if (LHS.isBitField())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(),
+ &RHS);
+ else
+ CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS, E->getType());
+ }
// If the result is clearly ignored, return now.
if (Ignore)
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index cdb15bf..cdc2fff 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -15,15 +15,29 @@
#include "CGObjCRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/StmtObjC.h"
#include "clang/Basic/Diagnostic.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/InlineAsm.h"
using namespace clang;
using namespace CodeGen;
+typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult;
+static TryEmitResult
+tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e);
+
+/// Given the address of a variable of pointer type, find the correct
+/// null to store into it.
+static llvm::Constant *getNullForVariable(llvm::Value *addr) {
+ const llvm::Type *type =
+ cast<llvm::PointerType>(addr->getType())->getElementType();
+ return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type));
+}
+
/// Emits an instance of NSConstantString representing the object.
llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
{
@@ -55,6 +69,7 @@
RValue Result) {
if (!Method)
return Result;
+
if (!Method->hasRelatedResultType() ||
CGF.getContext().hasSameType(E->getType(), Method->getResultType()) ||
!Result.isScalar())
@@ -71,6 +86,18 @@
// implementation vary between runtimes. We can get the receiver and
// arguments in generic code.
+ bool isDelegateInit = E->isDelegateInitCall();
+
+ // We don't retain the receiver in delegate init calls, and this is
+ // safe because the receiver value is always loaded from 'self',
+ // which we zero out. We don't want to Block_copy block receivers,
+ // though.
+ bool retainSelf =
+ (!isDelegateInit &&
+ CGM.getLangOptions().ObjCAutoRefCount &&
+ E->getMethodDecl() &&
+ E->getMethodDecl()->hasAttr<NSConsumesSelfAttr>());
+
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
bool isSuperMessage = false;
bool isClassMessage = false;
@@ -80,8 +107,15 @@
llvm::Value *Receiver = 0;
switch (E->getReceiverKind()) {
case ObjCMessageExpr::Instance:
- Receiver = EmitScalarExpr(E->getInstanceReceiver());
ReceiverType = E->getInstanceReceiver()->getType();
+ if (retainSelf) {
+ TryEmitResult ter = tryEmitARCRetainScalarExpr(*this,
+ E->getInstanceReceiver());
+ Receiver = ter.getPointer();
+ if (!ter.getInt())
+ Receiver = EmitARCRetainNonBlock(Receiver);
+ } else
+ Receiver = EmitScalarExpr(E->getInstanceReceiver());
break;
case ObjCMessageExpr::Class: {
@@ -92,6 +126,9 @@
assert(OID && "Invalid Objective-C class message send");
Receiver = Runtime.GetClass(Builder, OID);
isClassMessage = true;
+
+ if (retainSelf)
+ Receiver = EmitARCRetainNonBlock(Receiver);
break;
}
@@ -99,6 +136,9 @@
ReceiverType = E->getSuperType();
Receiver = LoadObjCSelf();
isSuperMessage = true;
+
+ if (retainSelf)
+ Receiver = EmitARCRetainNonBlock(Receiver);
break;
case ObjCMessageExpr::SuperClass:
@@ -106,14 +146,36 @@
Receiver = LoadObjCSelf();
isSuperMessage = true;
isClassMessage = true;
+
+ if (retainSelf)
+ Receiver = EmitARCRetainNonBlock(Receiver);
break;
}
+ QualType ResultType =
+ E->getMethodDecl() ? E->getMethodDecl()->getResultType() : E->getType();
+
CallArgList Args;
EmitCallArgs(Args, E->getMethodDecl(), E->arg_begin(), E->arg_end());
- QualType ResultType =
- E->getMethodDecl() ? E->getMethodDecl()->getResultType() : E->getType();
+ // For delegate init calls in ARC, do an unsafe store of null into
+ // self. This represents the call taking direct ownership of that
+ // value. We have to do this after emitting the other call
+ // arguments because they might also reference self, but we don't
+ // have to worry about any of them modifying self because that would
+ // be an undefined read and write of an object in unordered
+ // expressions.
+ if (isDelegateInit) {
+ assert(getLangOptions().ObjCAutoRefCount &&
+ "delegate init calls should only be marked in ARC");
+
+ // Do an unsafe store of null into self.
+ llvm::Value *selfAddr =
+ LocalDeclMap[cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()];
+ assert(selfAddr && "no self entry for a delegate init call?");
+
+ Builder.CreateStore(getNullForVariable(selfAddr), selfAddr);
+ }
RValue result;
if (isSuperMessage) {
@@ -134,10 +196,52 @@
Receiver, Args, OID,
E->getMethodDecl());
}
-
+
+ // For delegate init calls in ARC, implicitly store the result of
+ // the call back into self. This takes ownership of the value.
+ if (isDelegateInit) {
+ llvm::Value *selfAddr =
+ LocalDeclMap[cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()];
+ llvm::Value *newSelf = result.getScalarVal();
+
+ // The delegate return type isn't necessarily a matching type; in
+ // fact, it's quite likely to be 'id'.
+ const llvm::Type *selfTy =
+ cast<llvm::PointerType>(selfAddr->getType())->getElementType();
+ newSelf = Builder.CreateBitCast(newSelf, selfTy);
+
+ Builder.CreateStore(newSelf, selfAddr);
+ }
+
return AdjustRelatedResultType(*this, E, E->getMethodDecl(), result);
}
+namespace {
+struct FinishARCDealloc : EHScopeStack::Cleanup {
+ void Emit(CodeGenFunction &CGF, bool isForEH) {
+ const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl);
+ const ObjCImplementationDecl *impl
+ = cast<ObjCImplementationDecl>(method->getDeclContext());
+ const ObjCInterfaceDecl *iface = impl->getClassInterface();
+ if (!iface->getSuperClass()) return;
+
+ // Call [super dealloc] if we have a superclass.
+ llvm::Value *self = CGF.LoadObjCSelf();
+
+ CallArgList args;
+ CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(),
+ CGF.getContext().VoidTy,
+ method->getSelector(),
+ iface,
+ /*is category*/ false,
+ self,
+ /*is class msg*/ false,
+ args,
+ method);
+ }
+};
+}
+
/// StartObjCMethod - Begin emission of an ObjCMethod. This generates
/// the LLVM function and sets the other context used by
/// CodeGenFunction.
@@ -164,8 +268,21 @@
CurGD = OMD;
StartFunction(OMD, OMD->getResultType(), Fn, FI, args, StartLoc);
+
+ // In ARC, certain methods get an extra cleanup.
+ if (CGM.getLangOptions().ObjCAutoRefCount &&
+ OMD->isInstanceMethod() &&
+ OMD->getSelector().isUnarySelector()) {
+ const IdentifierInfo *ident =
+ OMD->getSelector().getIdentifierInfoForSlot(0);
+ if (ident->isStr("dealloc"))
+ EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind());
+ }
}
+static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ LValue lvalue, QualType type);
+
void CodeGenFunction::GenerateObjCGetterBody(ObjCIvarDecl *Ivar,
bool IsAtomic, bool IsStrong) {
LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
@@ -269,6 +386,9 @@
RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
Types.ConvertType(PD->getType())));
EmitReturnOfRValue(RV, PD->getType());
+
+ // objc_getProperty does an autorelease, so we should suppress ours.
+ AutoreleaseResult = false;
} else {
const llvm::Triple &Triple = getContext().Target.getTriple();
QualType IVART = Ivar->getType();
@@ -347,17 +467,23 @@
else {
LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
Ivar, 0);
- if (PD->getType()->isReferenceType()) {
- RValue RV = RValue::get(LV.getAddress());
- EmitReturnOfRValue(RV, PD->getType());
+ QualType propType = PD->getType();
+
+ llvm::Value *value;
+ if (propType->isReferenceType()) {
+ value = LV.getAddress();
+ } else {
+ // In ARC, we want to emit this retained.
+ if (getLangOptions().ObjCAutoRefCount &&
+ PD->getType()->isObjCRetainableType())
+ value = emitARCRetainLoadOfScalar(*this, LV, IVART);
+ else
+ value = EmitLoadOfLValue(LV, IVART).getScalarVal();
+
+ value = Builder.CreateBitCast(value, ConvertType(propType));
}
- else {
- CodeGenTypes &Types = CGM.getTypes();
- RValue RV = EmitLoadOfLValue(LV, IVART);
- RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
- Types.ConvertType(PD->getType())));
- EmitReturnOfRValue(RV, PD->getType());
- }
+
+ EmitReturnOfRValue(RValue::get(value), propType);
}
}
@@ -597,6 +723,11 @@
};
}
+static void pushReleaseForIvar(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
+ llvm::Value *self);
+static void pushWeakReleaseForIvar(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
+ llvm::Value *self);
+
static void emitCXXDestructMethod(CodeGenFunction &CGF,
ObjCImplementationDecl *impl) {
CodeGenFunction::RunCleanupsScope scope(CGF);
@@ -631,6 +762,14 @@
CGF.EHStack.pushCleanup<CallIvarDtor>(NormalAndEHCleanup,
ivar, self);
break;
+
+ case QualType::DK_objc_strong_lifetime:
+ pushReleaseForIvar(CGF, ivar, self);
+ break;
+
+ case QualType::DK_objc_weak_lifetime:
+ pushWeakReleaseForIvar(CGF, ivar, self);
+ break;
}
}
@@ -645,6 +784,9 @@
// Emit .cxx_construct.
if (ctor) {
+ // Suppress the final autorelease in ARC.
+ AutoreleaseResult = false;
+
llvm::SmallVector<CXXCtorInitializer *, 8> IvarInitializers;
for (ObjCImplementationDecl::init_const_iterator B = IMP->init_begin(),
E = IMP->init_end(); B != E; ++B) {
@@ -747,6 +889,16 @@
llvm::Value *Receiver = LV.getPropertyRefBaseAddr();
+ if (CGM.getLangOptions().ObjCAutoRefCount) {
+ QualType receiverType;
+ if (E->isSuperReceiver())
+ receiverType = E->getSuperReceiverType();
+ else if (E->isClassReceiver())
+ receiverType = getContext().getObjCClassType();
+ else
+ receiverType = E->getBase()->getType();
+ }
+
// Accesses to 'super' follow a different code path.
if (E->isSuperReceiver())
return AdjustRelatedResultType(*this, E, method,
@@ -757,9 +909,9 @@
const ObjCInterfaceDecl *ReceiverClass
= (E->isClassReceiver() ? E->getClassReceiver() : 0);
return AdjustRelatedResultType(*this, E, method,
- CGM.getObjCRuntime().
- GenerateMessageSend(*this, Return, ResultType, S,
- Receiver, CallArgList(), ReceiverClass));
+ CGM.getObjCRuntime().
+ GenerateMessageSend(*this, Return, ResultType, S,
+ Receiver, CallArgList(), ReceiverClass));
}
void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src,
@@ -1072,4 +1224,1197 @@
CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S);
}
+/// Produce the code for a CK_ObjCProduceObject. Just does a
+/// primitive retain.
+llvm::Value *CodeGenFunction::EmitObjCProduceObject(QualType type,
+ llvm::Value *value) {
+ return EmitARCRetain(type, value);
+}
+
+namespace {
+ struct CallObjCRelease : EHScopeStack::Cleanup {
+ CallObjCRelease(QualType type, llvm::Value *ptr, llvm::Value *condition)
+ : type(type), ptr(ptr), condition(condition) {}
+ QualType type;
+ llvm::Value *ptr;
+ llvm::Value *condition;
+
+ void Emit(CodeGenFunction &CGF, bool forEH) {
+ llvm::Value *object;
+
+ // If we're in a conditional branch, we had to stash away in an
+ // alloca the pointer to be released.
+ llvm::BasicBlock *cont = 0;
+ if (condition) {
+ llvm::BasicBlock *release = CGF.createBasicBlock("release.yes");
+ cont = CGF.createBasicBlock("release.cont");
+
+ llvm::Value *cond = CGF.Builder.CreateLoad(condition);
+ CGF.Builder.CreateCondBr(cond, release, cont);
+ CGF.EmitBlock(release);
+ object = CGF.Builder.CreateLoad(ptr);
+ } else {
+ object = ptr;
+ }
+
+ CGF.EmitARCRelease(object, /*precise*/ true);
+
+ if (cont) CGF.EmitBlock(cont);
+ }
+ };
+}
+
+/// Produce the code for a CK_ObjCConsumeObject. Does a primitive
+/// release at the end of the full-expression.
+llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type,
+ llvm::Value *object) {
+ // If we're in a conditional branch, we need to make the cleanup
+ // conditional. FIXME: this really needs to be supported by the
+ // environment.
+ llvm::AllocaInst *cond;
+ llvm::Value *ptr;
+ if (isInConditionalBranch()) {
+ cond = CreateTempAlloca(Builder.getInt1Ty(), "release.cond");
+ ptr = CreateTempAlloca(object->getType(), "release.value");
+
+ // The alloca is false until we get here.
+ // FIXME: er. doesn't this need to be set at the start of the condition?
+ InitTempAlloca(cond, Builder.getFalse());
+
+ // Then it turns true.
+ Builder.CreateStore(Builder.getTrue(), cond);
+ Builder.CreateStore(object, ptr);
+ } else {
+ cond = 0;
+ ptr = object;
+ }
+
+ EHStack.pushCleanup<CallObjCRelease>(getARCCleanupKind(), type, ptr, cond);
+ return object;
+}
+
+llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
+ llvm::Value *value) {
+ return EmitARCRetainAutorelease(type, value);
+}
+
+
+static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
+ const llvm::FunctionType *type,
+ llvm::StringRef fnName) {
+ llvm::Constant *fn = CGM.CreateRuntimeFunction(type, fnName);
+
+ // In -fobjc-no-arc-runtime, emit weak references to the runtime
+ // support library.
+ if (CGM.getLangOptions().ObjCNoAutoRefCountRuntime)
+ if (llvm::Function *f = dyn_cast<llvm::Function>(fn))
+ f->setLinkage(llvm::Function::ExternalWeakLinkage);
+
+ return fn;
+}
+
+/// Perform an operation having the signature
+/// i8* (i8*)
+/// where a null input causes a no-op and returns null.
+static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
+ llvm::Value *value,
+ llvm::Constant *&fn,
+ llvm::StringRef fnName) {
+ if (isa<llvm::ConstantPointerNull>(value)) return value;
+
+ if (!fn) {
+ std::vector<const llvm::Type*> args(1, CGF.Int8PtrTy);
+ const llvm::FunctionType *fnType =
+ llvm::FunctionType::get(CGF.Int8PtrTy, args, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ // Cast the argument to 'id'.
+ const llvm::Type *origType = value->getType();
+ value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
+
+ // Call the function.
+ llvm::CallInst *call = CGF.Builder.CreateCall(fn, value);
+ call->setDoesNotThrow();
+
+ // Cast the result back to the original type.
+ return CGF.Builder.CreateBitCast(call, origType);
+}
+
+/// Perform an operation having the following signature:
+/// i8* (i8**)
+static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ llvm::Constant *&fn,
+ llvm::StringRef fnName) {
+ if (!fn) {
+ std::vector<const llvm::Type*> args(1, CGF.Int8PtrPtrTy);
+ const llvm::FunctionType *fnType =
+ llvm::FunctionType::get(CGF.Int8PtrTy, args, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ // Cast the argument to 'id*'.
+ const llvm::Type *origType = addr->getType();
+ addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
+
+ // Call the function.
+ llvm::CallInst *call = CGF.Builder.CreateCall(fn, addr);
+ call->setDoesNotThrow();
+
+ // Cast the result back to a dereference of the original type.
+ llvm::Value *result = call;
+ if (origType != CGF.Int8PtrPtrTy)
+ result = CGF.Builder.CreateBitCast(result,
+ cast<llvm::PointerType>(origType)->getElementType());
+
+ return result;
+}
+
+/// Perform an operation having the following signature:
+/// i8* (i8**, i8*)
+static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ llvm::Value *value,
+ llvm::Constant *&fn,
+ llvm::StringRef fnName,
+ bool ignored) {
+ assert(cast<llvm::PointerType>(addr->getType())->getElementType()
+ == value->getType());
+
+ if (!fn) {
+ std::vector<const llvm::Type*> argTypes(2);
+ argTypes[0] = CGF.Int8PtrPtrTy;
+ argTypes[1] = CGF.Int8PtrTy;
+
+ const llvm::FunctionType *fnType
+ = llvm::FunctionType::get(CGF.Int8PtrTy, argTypes, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ const llvm::Type *origType = value->getType();
+
+ addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
+ value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
+
+ llvm::CallInst *result = CGF.Builder.CreateCall2(fn, addr, value);
+ result->setDoesNotThrow();
+
+ if (ignored) return 0;
+
+ return CGF.Builder.CreateBitCast(result, origType);
+}
+
+/// Perform an operation having the following signature:
+/// void (i8**, i8**)
+static void emitARCCopyOperation(CodeGenFunction &CGF,
+ llvm::Value *dst,
+ llvm::Value *src,
+ llvm::Constant *&fn,
+ llvm::StringRef fnName) {
+ assert(dst->getType() == src->getType());
+
+ if (!fn) {
+ std::vector<const llvm::Type*> argTypes(2, CGF.Int8PtrPtrTy);
+ const llvm::FunctionType *fnType
+ = llvm::FunctionType::get(CGF.Builder.getVoidTy(), argTypes, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ dst = CGF.Builder.CreateBitCast(dst, CGF.Int8PtrPtrTy);
+ src = CGF.Builder.CreateBitCast(src, CGF.Int8PtrPtrTy);
+
+ llvm::CallInst *result = CGF.Builder.CreateCall2(fn, dst, src);
+ result->setDoesNotThrow();
+}
+
+/// Produce the code to do a retain. Based on the type, calls one of:
+/// call i8* @objc_retain(i8* %value)
+/// call i8* @objc_retainBlock(i8* %value)
+llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
+ if (type->isBlockPointerType())
+ return EmitARCRetainBlock(value);
+ else
+ return EmitARCRetainNonBlock(value);
+}
+
+/// Retain the given object, with normal retain semantics.
+/// call i8* @objc_retain(i8* %value)
+llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retain,
+ "objc_retain");
+}
+
+/// Retain the given block, with _Block_copy semantics.
+/// call i8* @objc_retainBlock(i8* %value)
+llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retainBlock,
+ "objc_retainBlock");
+}
+
+/// Retain the given object which is the result of a function call.
+/// call i8* @objc_retainAutoreleasedReturnValue(i8* %value)
+///
+/// Yes, this function name is one character away from a different
+/// call with completely different semantics.
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
+ // Fetch the void(void) inline asm which marks that we're going to
+ // retain the autoreleased return value.
+ llvm::InlineAsm *&marker
+ = CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker;
+ if (!marker) {
+ llvm::StringRef assembly
+ = CGM.getTargetCodeGenInfo()
+ .getARCRetainAutoreleasedReturnValueMarker();
+
+ // If we have an empty assembly string, there's nothing to do.
+ if (assembly.empty()) {
+
+ // Otherwise, at -O0, build an inline asm that we're going to call
+ // in a moment.
+ } else if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ llvm::FunctionType *type =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
+ /*variadic*/ false);
+
+ marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true);
+
+ // If we're at -O1 and above, we don't want to litter the code
+ // with this marker yet, so leave a breadcrumb for the ARC
+ // optimizer to pick up.
+ } else {
+ llvm::NamedMDNode *metadata =
+ CGM.getModule().getOrInsertNamedMetadata(
+ "clang.arc.retainAutoreleasedReturnValueMarker");
+ assert(metadata->getNumOperands() <= 1);
+ if (metadata->getNumOperands() == 0) {
+ llvm::Value *string = llvm::MDString::get(getLLVMContext(), assembly);
+ llvm::Value *args[] = { string };
+ metadata->addOperand(llvm::MDNode::get(getLLVMContext(), args));
+ }
+ }
+ }
+
+ // Call the marker asm if we made one, which we do only at -O0.
+ if (marker) Builder.CreateCall(marker);
+
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retainAutoreleasedReturnValue,
+ "objc_retainAutoreleasedReturnValue");
+}
+
+/// Release the given object.
+/// call void @objc_release(i8* %value)
+void CodeGenFunction::EmitARCRelease(llvm::Value *value, bool precise) {
+ if (isa<llvm::ConstantPointerNull>(value)) return;
+
+ llvm::Constant *&fn = CGM.getARCEntrypoints().objc_release;
+ if (!fn) {
+ std::vector<const llvm::Type*> args(1, Int8PtrTy);
+ const llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Builder.getVoidTy(), args, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_release");
+ }
+
+ // Cast the argument to 'id'.
+ value = Builder.CreateBitCast(value, Int8PtrTy);
+
+ // Call objc_release.
+ llvm::CallInst *call = Builder.CreateCall(fn, value);
+ call->setDoesNotThrow();
+
+ if (!precise) {
+ llvm::SmallVector<llvm::Value*,1> args;
+ call->setMetadata("clang.imprecise_release",
+ llvm::MDNode::get(Builder.getContext(), args));
+ }
+}
+
+/// Store into a strong object. Always calls this:
+/// call void @objc_storeStrong(i8** %addr, i8* %value)
+llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr,
+ llvm::Value *value,
+ bool ignored) {
+ assert(cast<llvm::PointerType>(addr->getType())->getElementType()
+ == value->getType());
+
+ llvm::Constant *&fn = CGM.getARCEntrypoints().objc_storeStrong;
+ if (!fn) {
+ const llvm::Type *argTypes[] = { Int8PtrPtrTy, Int8PtrTy };
+ const llvm::FunctionType *fnType
+ = llvm::FunctionType::get(Builder.getVoidTy(), argTypes, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_storeStrong");
+ }
+
+ addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
+ llvm::Value *castValue = Builder.CreateBitCast(value, Int8PtrTy);
+
+ Builder.CreateCall2(fn, addr, castValue)->setDoesNotThrow();
+
+ if (ignored) return 0;
+ return value;
+}
+
+/// Store into a strong object. Sometimes calls this:
+/// call void @objc_storeStrong(i8** %addr, i8* %value)
+/// Other times, breaks it down into components.
+llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst, QualType type,
+ llvm::Value *newValue,
+ bool ignored) {
+ bool isBlock = type->isBlockPointerType();
+
+ // Use a store barrier at -O0 unless this is a block type or the
+ // lvalue is inadequately aligned.
+ if (shouldUseFusedARCCalls() &&
+ !isBlock &&
+ !(dst.getAlignment() && dst.getAlignment() < PointerAlignInBytes)) {
+ return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored);
+ }
+
+ // Otherwise, split it out.
+
+ // Retain the new value.
+ newValue = EmitARCRetain(type, newValue);
+
+ // Read the old value.
+ llvm::Value *oldValue =
+ EmitLoadOfScalar(dst.getAddress(), dst.isVolatileQualified(),
+ dst.getAlignment(), type, dst.getTBAAInfo());
+
+ // Store. We do this before the release so that any deallocs won't
+ // see the old value.
+ EmitStoreOfScalar(newValue, dst.getAddress(),
+ dst.isVolatileQualified(), dst.getAlignment(),
+ type, dst.getTBAAInfo());
+
+ // Finally, release the old value.
+ EmitARCRelease(oldValue, /*precise*/ false);
+
+ return newValue;
+}
+
+/// Autorelease the given object.
+/// call i8* @objc_autorelease(i8* %value)
+llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_autorelease,
+ "objc_autorelease");
+}
+
+/// Autorelease the given object.
+/// call i8* @objc_autoreleaseReturnValue(i8* %value)
+llvm::Value *
+CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_autoreleaseReturnValue,
+ "objc_autoreleaseReturnValue");
+}
+
+/// Do a fused retain/autorelease of the given object.
+/// call i8* @objc_retainAutoreleaseReturnValue(i8* %value)
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retainAutoreleaseReturnValue,
+ "objc_retainAutoreleaseReturnValue");
+}
+
+/// Do a fused retain/autorelease of the given object.
+/// call i8* @objc_retainAutorelease(i8* %value)
+/// or
+/// %retain = call i8* @objc_retainBlock(i8* %value)
+/// call i8* @objc_autorelease(i8* %retain)
+llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
+ llvm::Value *value) {
+ if (!type->isBlockPointerType())
+ return EmitARCRetainAutoreleaseNonBlock(value);
+
+ if (isa<llvm::ConstantPointerNull>(value)) return value;
+
+ const llvm::Type *origType = value->getType();
+ value = Builder.CreateBitCast(value, Int8PtrTy);
+ value = EmitARCRetainBlock(value);
+ value = EmitARCAutorelease(value);
+ return Builder.CreateBitCast(value, origType);
+}
+
+/// Do a fused retain/autorelease of the given object.
+/// call i8* @objc_retainAutorelease(i8* %value)
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retainAutorelease,
+ "objc_retainAutorelease");
+}
+
+/// i8* @objc_loadWeak(i8** %addr)
+/// Essentially objc_autorelease(objc_loadWeakRetained(addr)).
+llvm::Value *CodeGenFunction::EmitARCLoadWeak(llvm::Value *addr) {
+ return emitARCLoadOperation(*this, addr,
+ CGM.getARCEntrypoints().objc_loadWeak,
+ "objc_loadWeak");
+}
+
+/// i8* @objc_loadWeakRetained(i8** %addr)
+llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(llvm::Value *addr) {
+ return emitARCLoadOperation(*this, addr,
+ CGM.getARCEntrypoints().objc_loadWeakRetained,
+ "objc_loadWeakRetained");
+}
+
+/// i8* @objc_storeWeak(i8** %addr, i8* %value)
+/// Returns %value.
+llvm::Value *CodeGenFunction::EmitARCStoreWeak(llvm::Value *addr,
+ llvm::Value *value,
+ bool ignored) {
+ return emitARCStoreOperation(*this, addr, value,
+ CGM.getARCEntrypoints().objc_storeWeak,
+ "objc_storeWeak", ignored);
+}
+
+/// i8* @objc_initWeak(i8** %addr, i8* %value)
+/// Returns %value. %addr is known to not have a current weak entry.
+/// Essentially equivalent to:
+/// *addr = nil; objc_storeWeak(addr, value);
+void CodeGenFunction::EmitARCInitWeak(llvm::Value *addr, llvm::Value *value) {
+ // If we're initializing to null, just write null to memory; no need
+ // to get the runtime involved. But don't do this if optimization
+ // is enabled, because accounting for this would make the optimizer
+ // much more complicated.
+ if (isa<llvm::ConstantPointerNull>(value) &&
+ CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ Builder.CreateStore(value, addr);
+ return;
+ }
+
+ emitARCStoreOperation(*this, addr, value,
+ CGM.getARCEntrypoints().objc_initWeak,
+ "objc_initWeak", /*ignored*/ true);
+}
+
+/// void @objc_destroyWeak(i8** %addr)
+/// Essentially objc_storeWeak(addr, nil).
+void CodeGenFunction::EmitARCDestroyWeak(llvm::Value *addr) {
+ llvm::Constant *&fn = CGM.getARCEntrypoints().objc_destroyWeak;
+ if (!fn) {
+ std::vector<const llvm::Type*> args(1, Int8PtrPtrTy);
+ const llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Builder.getVoidTy(), args, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_destroyWeak");
+ }
+
+ // Cast the argument to 'id*'.
+ addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
+
+ llvm::CallInst *call = Builder.CreateCall(fn, addr);
+ call->setDoesNotThrow();
+}
+
+/// void @objc_moveWeak(i8** %dest, i8** %src)
+/// Disregards the current value in %dest. Leaves %src pointing to nothing.
+/// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)).
+void CodeGenFunction::EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src) {
+ emitARCCopyOperation(*this, dst, src,
+ CGM.getARCEntrypoints().objc_moveWeak,
+ "objc_moveWeak");
+}
+
+/// void @objc_copyWeak(i8** %dest, i8** %src)
+/// Disregards the current value in %dest. Essentially
+/// objc_release(objc_initWeak(dest, objc_readWeakRetained(src)))
+void CodeGenFunction::EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src) {
+ emitARCCopyOperation(*this, dst, src,
+ CGM.getARCEntrypoints().objc_copyWeak,
+ "objc_copyWeak");
+}
+
+/// Produce the code to do a objc_autoreleasepool_push.
+/// call i8* @objc_autoreleasePoolPush(void)
+llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
+ llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPush;
+ if (!fn) {
+ const llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Int8PtrTy, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPush");
+ }
+
+ llvm::CallInst *call = Builder.CreateCall(fn);
+ call->setDoesNotThrow();
+
+ return call;
+}
+
+/// Produce the code to do a primitive release.
+/// call void @objc_autoreleasePoolPop(i8* %ptr)
+void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
+ assert(value->getType() == Int8PtrTy);
+
+ llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPop;
+ if (!fn) {
+ std::vector<const llvm::Type*> args(1, Int8PtrTy);
+ const llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Builder.getVoidTy(), args, false);
+
+ // We don't want to use a weak import here; instead we should not
+ // fall into this path.
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPop");
+ }
+
+ llvm::CallInst *call = Builder.CreateCall(fn, value);
+ call->setDoesNotThrow();
+}
+
+/// Produce the code to do an MRR version objc_autoreleasepool_push.
+/// Which is: [[NSAutoreleasePool alloc] init];
+/// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class.
+/// init is declared as: - (id) init; in its NSObject super class.
+///
+llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() {
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(Builder);
+ // [NSAutoreleasePool alloc]
+ IdentifierInfo *II = &CGM.getContext().Idents.get("alloc");
+ Selector AllocSel = getContext().Selectors.getSelector(0, &II);
+ CallArgList Args;
+ RValue AllocRV =
+ Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().getObjCIdType(),
+ AllocSel, Receiver, Args);
+
+ // [Receiver init]
+ Receiver = AllocRV.getScalarVal();
+ II = &CGM.getContext().Idents.get("init");
+ Selector InitSel = getContext().Selectors.getSelector(0, &II);
+ RValue InitRV =
+ Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().getObjCIdType(),
+ InitSel, Receiver, Args);
+ return InitRV.getScalarVal();
+}
+
+/// Produce the code to do a primitive release.
+/// [tmp drain];
+void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
+ IdentifierInfo *II = &CGM.getContext().Idents.get("drain");
+ Selector DrainSel = getContext().Selectors.getSelector(0, &II);
+ CallArgList Args;
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().VoidTy, DrainSel, Arg, Args);
+}
+
+namespace {
+ struct ObjCReleasingCleanup : EHScopeStack::Cleanup {
+ private:
+ QualType type;
+ llvm::Value *addr;
+
+ protected:
+ ObjCReleasingCleanup(QualType type, llvm::Value *addr)
+ : type(type), addr(addr) {}
+
+ virtual llvm::Value *getAddress(CodeGenFunction &CGF,
+ llvm::Value *addr) {
+ return addr;
+ }
+
+ virtual void release(CodeGenFunction &CGF,
+ QualType type,
+ llvm::Value *addr) = 0;
+
+ public:
+ void Emit(CodeGenFunction &CGF, bool isForEH) {
+ const ArrayType *arrayType = CGF.getContext().getAsArrayType(type);
+
+ llvm::Value *addr = getAddress(CGF, this->addr);
+
+ // If we don't have an array type, this is easy.
+ if (!arrayType)
+ return release(CGF, type, addr);
+
+ llvm::Value *begin = addr;
+ QualType baseType;
+
+ // Otherwise, this is more painful.
+ llvm::Value *count = emitArrayLength(CGF, arrayType, baseType,
+ begin);
+
+ assert(baseType == CGF.getContext().getBaseElementType(arrayType));
+
+ llvm::BasicBlock *incomingBB = CGF.Builder.GetInsertBlock();
+
+ // id *cur = begin;
+ // id *end = begin + count;
+ llvm::Value *end =
+ CGF.Builder.CreateInBoundsGEP(begin, count, "array.end");
+
+ // loopBB:
+ llvm::BasicBlock *loopBB = CGF.createBasicBlock("release-loop");
+ CGF.EmitBlock(loopBB);
+
+ llvm::PHINode *cur = CGF.Builder.CreatePHI(begin->getType(), 2, "cur");
+ cur->addIncoming(begin, incomingBB);
+
+ // if (cur == end) goto endBB;
+ llvm::Value *eq = CGF.Builder.CreateICmpEQ(cur, end, "release-loop.done");
+ llvm::BasicBlock *bodyBB = CGF.createBasicBlock("release-loop.body");
+ llvm::BasicBlock *endBB = CGF.createBasicBlock("release-loop.cont");
+ CGF.Builder.CreateCondBr(eq, endBB, bodyBB);
+ CGF.EmitBlock(bodyBB);
+
+ // Release the value at 'cur'.
+ release(CGF, baseType, cur);
+
+ // ++cur;
+ // goto loopBB;
+ llvm::Value *next = CGF.Builder.CreateConstInBoundsGEP1_32(cur, 1);
+ cur->addIncoming(next, CGF.Builder.GetInsertBlock());
+ CGF.Builder.CreateBr(loopBB);
+
+ // endBB:
+ CGF.EmitBlock(endBB);
+ }
+
+ private:
+ /// Computes the length of an array in elements, as well
+ /// as the base
+ static llvm::Value *emitArrayLength(CodeGenFunction &CGF,
+ const ArrayType *origArrayType,
+ QualType &baseType,
+ llvm::Value *&addr) {
+ ASTContext &Ctx = CGF.getContext();
+ const ArrayType *arrayType = origArrayType;
+
+ // If it's a VLA, we have to load the stored size. Note that
+ // this is the size of the VLA in bytes, not its size in elements.
+ llvm::Value *vlaSizeInBytes = 0;
+ if (isa<VariableArrayType>(arrayType)) {
+ vlaSizeInBytes = CGF.GetVLASize(cast<VariableArrayType>(arrayType));
+
+ // Walk into all VLAs. This doesn't require changes to addr,
+ // which has type T* where T is the first non-VLA element type.
+ do {
+ QualType elementType = arrayType->getElementType();
+ arrayType = Ctx.getAsArrayType(elementType);
+
+ // If we only have VLA components, 'addr' requires no adjustment.
+ if (!arrayType) {
+ baseType = elementType;
+ return divideVLASizeByBaseType(CGF, vlaSizeInBytes, baseType);
+ }
+ } while (isa<VariableArrayType>(arrayType));
+
+ // We get out here only if we find a constant array type
+ // inside the VLA.
+ }
+
+ // We have some number of constant-length arrays, so addr should
+ // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
+ // down to the first element of addr.
+ llvm::SmallVector<llvm::Value*, 8> gepIndices;
+
+ // GEP down to the array type.
+ llvm::ConstantInt *zero = CGF.Builder.getInt32(0);
+ gepIndices.push_back(zero);
+
+ // It's more efficient to calculate the count from the LLVM
+ // constant-length arrays than to re-evaluate the array bounds.
+ uint64_t countFromCLAs = 1;
+
+ const llvm::ArrayType *llvmArrayType =
+ cast<llvm::ArrayType>(
+ cast<llvm::PointerType>(addr->getType())->getElementType());
+ while (true) {
+ assert(isa<ConstantArrayType>(arrayType));
+ assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
+ == llvmArrayType->getNumElements());
+
+ gepIndices.push_back(zero);
+ countFromCLAs *= llvmArrayType->getNumElements();
+
+ llvmArrayType =
+ dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
+ if (!llvmArrayType) break;
+
+ arrayType = Ctx.getAsArrayType(arrayType->getElementType());
+ assert(arrayType && "LLVM and Clang types are out-of-synch");
+ }
+
+ // Create the actual GEP.
+ addr = CGF.Builder.CreateInBoundsGEP(addr, gepIndices.begin(),
+ gepIndices.end(), "array.begin");
+
+ baseType = arrayType->getElementType();
+
+ // If we had an VLA dimensions, we need to use the captured size.
+ if (vlaSizeInBytes)
+ return divideVLASizeByBaseType(CGF, vlaSizeInBytes, baseType);
+
+ // Otherwise, use countFromCLAs.
+ assert(countFromCLAs == (uint64_t)
+ (Ctx.getTypeSizeInChars(origArrayType).getQuantity() /
+ Ctx.getTypeSizeInChars(baseType).getQuantity()));
+
+ return llvm::ConstantInt::get(CGF.IntPtrTy, countFromCLAs);
+ }
+
+ static llvm::Value *divideVLASizeByBaseType(CodeGenFunction &CGF,
+ llvm::Value *vlaSizeInBytes,
+ QualType baseType) {
+ // Divide the base type size back out of the
+ CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
+ llvm::Value *baseSizeInBytes =
+ llvm::ConstantInt::get(vlaSizeInBytes->getType(),
+ baseSize.getQuantity());
+
+ return CGF.Builder.CreateUDiv(vlaSizeInBytes, baseSizeInBytes,
+ "array.vla-count");
+ }
+ };
+
+ /// A cleanup that calls @objc_release on all the objects to release.
+ struct CallReleaseForObject : ObjCReleasingCleanup {
+ bool precise;
+ CallReleaseForObject(QualType type, llvm::Value *addr, bool precise)
+ : ObjCReleasingCleanup(type, addr), precise(precise) {}
+
+ void release(CodeGenFunction &CGF, QualType type, llvm::Value *addr) {
+ llvm::Value *ptr = CGF.Builder.CreateLoad(addr, "tmp");
+ CGF.EmitARCRelease(ptr, precise);
+ }
+ };
+
+ /// A cleanup that calls @objc_storeStrong(nil) on all the objects to
+ /// release in an ivar.
+ struct CallReleaseForIvar : ObjCReleasingCleanup {
+ const ObjCIvarDecl *ivar;
+ CallReleaseForIvar(const ObjCIvarDecl *ivar, llvm::Value *self)
+ : ObjCReleasingCleanup(ivar->getType(), self), ivar(ivar) {}
+
+ llvm::Value *getAddress(CodeGenFunction &CGF, llvm::Value *addr) {
+ LValue lvalue
+ = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0);
+ return lvalue.getAddress();
+ }
+
+ void release(CodeGenFunction &CGF, QualType type, llvm::Value *addr) {
+ // Release ivars by storing nil into them; it just makes things easier.
+ llvm::Value *null = getNullForVariable(addr);
+ CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
+ }
+ };
+
+ /// A cleanup that calls @objc_release on all of the objects to release in
+ /// a field.
+ struct CallReleaseForField : CallReleaseForObject {
+ const FieldDecl *Field;
+
+ explicit CallReleaseForField(const FieldDecl *Field)
+ : CallReleaseForObject(Field->getType(), 0, /*precise=*/true),
+ Field(Field) { }
+
+ llvm::Value *getAddress(CodeGenFunction &CGF, llvm::Value *) {
+ llvm::Value *This = CGF.LoadCXXThis();
+ LValue LV = CGF.EmitLValueForField(This, Field, 0);
+ return LV.getAddress();
+ }
+ };
+
+ /// A cleanup that calls @objc_weak_release on all the objects to
+ /// release in an object.
+ struct CallWeakReleaseForObject : ObjCReleasingCleanup {
+ CallWeakReleaseForObject(QualType type, llvm::Value *addr)
+ : ObjCReleasingCleanup(type, addr) {}
+
+ void release(CodeGenFunction &CGF, QualType type, llvm::Value *addr) {
+ CGF.EmitARCDestroyWeak(addr);
+ }
+ };
+
+
+ /// A cleanup that calls @objc_weak_release on all the objects to
+ /// release in an ivar.
+ struct CallWeakReleaseForIvar : CallWeakReleaseForObject {
+ const ObjCIvarDecl *ivar;
+ CallWeakReleaseForIvar(const ObjCIvarDecl *ivar, llvm::Value *self)
+ : CallWeakReleaseForObject(ivar->getType(), self), ivar(ivar) {}
+
+ llvm::Value *getAddress(CodeGenFunction &CGF, llvm::Value *addr) {
+ LValue lvalue
+ = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0);
+ return lvalue.getAddress();
+ }
+ };
+
+ /// A cleanup that calls @objc_weak_release on all the objects to
+ /// release in a field;
+ struct CallWeakReleaseForField : CallWeakReleaseForObject {
+ const FieldDecl *Field;
+ CallWeakReleaseForField(const FieldDecl *Field)
+ : CallWeakReleaseForObject(Field->getType(), 0), Field(Field) {}
+
+ llvm::Value *getAddress(CodeGenFunction &CGF, llvm::Value *) {
+ llvm::Value *This = CGF.LoadCXXThis();
+ LValue LV = CGF.EmitLValueForField(This, Field, 0);
+ return LV.getAddress();
+ }
+ };
+
+ struct CallObjCAutoreleasePoolObject : EHScopeStack::Cleanup {
+ llvm::Value *Token;
+
+ CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
+
+ void Emit(CodeGenFunction &CGF, bool isForEH) {
+ CGF.EmitObjCAutoreleasePoolPop(Token);
+ }
+ };
+ struct CallObjCMRRAutoreleasePoolObject : EHScopeStack::Cleanup {
+ llvm::Value *Token;
+
+ CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
+
+ void Emit(CodeGenFunction &CGF, bool isForEH) {
+ CGF.EmitObjCMRRAutoreleasePoolPop(Token);
+ }
+ };
+}
+
+void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) {
+ if (CGM.getLangOptions().ObjCAutoRefCount)
+ EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr);
+ else
+ EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr);
+}
+
+/// PushARCReleaseCleanup - Enter a cleanup to perform a release on a
+/// given object or array of objects.
+void CodeGenFunction::PushARCReleaseCleanup(CleanupKind cleanupKind,
+ QualType type,
+ llvm::Value *addr,
+ bool precise) {
+ EHStack.pushCleanup<CallReleaseForObject>(cleanupKind, type, addr, precise);
+}
+
+/// PushARCWeakReleaseCleanup - Enter a cleanup to perform a weak
+/// release on the given object or array of objects.
+void CodeGenFunction::PushARCWeakReleaseCleanup(CleanupKind cleanupKind,
+ QualType type,
+ llvm::Value *addr) {
+ EHStack.pushCleanup<CallWeakReleaseForObject>(cleanupKind, type, addr);
+}
+
+/// PushARCReleaseCleanup - Enter a cleanup to perform a release on a
+/// given object or array of objects.
+void CodeGenFunction::PushARCFieldReleaseCleanup(CleanupKind cleanupKind,
+ const FieldDecl *field) {
+ EHStack.pushCleanup<CallReleaseForField>(cleanupKind, field);
+}
+
+/// PushARCWeakReleaseCleanup - Enter a cleanup to perform a weak
+/// release on the given object or array of objects.
+void CodeGenFunction::PushARCFieldWeakReleaseCleanup(CleanupKind cleanupKind,
+ const FieldDecl *field) {
+ EHStack.pushCleanup<CallWeakReleaseForField>(cleanupKind, field);
+}
+
+static void pushReleaseForIvar(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
+ llvm::Value *self) {
+ CGF.EHStack.pushCleanup<CallReleaseForIvar>(CGF.getARCCleanupKind(),
+ ivar, self);
+}
+
+static void pushWeakReleaseForIvar(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
+ llvm::Value *self) {
+ CGF.EHStack.pushCleanup<CallWeakReleaseForIvar>(CGF.getARCCleanupKind(),
+ ivar, self);
+}
+
+static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ LValue lvalue,
+ QualType type) {
+ switch (type.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Autoreleasing:
+ return TryEmitResult(CGF.EmitLoadOfLValue(lvalue, type).getScalarVal(),
+ false);
+
+ case Qualifiers::OCL_Weak:
+ return TryEmitResult(CGF.EmitARCLoadWeakRetained(lvalue.getAddress()),
+ true);
+ }
+
+ llvm_unreachable("impossible lifetime!");
+ return TryEmitResult();
+}
+
+static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ const Expr *e) {
+ e = e->IgnoreParens();
+ QualType type = e->getType();
+
+ // As a very special optimization, in ARC++, if the l-value is the
+ // result of a non-volatile assignment, do a simple retain of the
+ // result of the call to objc_storeWeak instead of reloading.
+ if (CGF.getLangOptions().CPlusPlus &&
+ !type.isVolatileQualified() &&
+ type.getObjCLifetime() == Qualifiers::OCL_Weak &&
+ isa<BinaryOperator>(e) &&
+ cast<BinaryOperator>(e)->getOpcode() == BO_Assign)
+ return TryEmitResult(CGF.EmitScalarExpr(e), false);
+
+ return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type);
+}
+
+static llvm::Value *emitARCRetainAfterCall(CodeGenFunction &CGF,
+ llvm::Value *value);
+
+/// Given that the given expression is some sort of call (which does
+/// not return retained), emit a retain following it.
+static llvm::Value *emitARCRetainCall(CodeGenFunction &CGF, const Expr *e) {
+ llvm::Value *value = CGF.EmitScalarExpr(e);
+ return emitARCRetainAfterCall(CGF, value);
+}
+
+static llvm::Value *emitARCRetainAfterCall(CodeGenFunction &CGF,
+ llvm::Value *value) {
+ if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) {
+ CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
+
+ // Place the retain immediately following the call.
+ CGF.Builder.SetInsertPoint(call->getParent(),
+ ++llvm::BasicBlock::iterator(call));
+ value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
+
+ CGF.Builder.restoreIP(ip);
+ return value;
+ } else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) {
+ CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
+
+ // Place the retain at the beginning of the normal destination block.
+ llvm::BasicBlock *BB = invoke->getNormalDest();
+ CGF.Builder.SetInsertPoint(BB, BB->begin());
+ value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
+
+ CGF.Builder.restoreIP(ip);
+ return value;
+
+ // Bitcasts can arise because of related-result returns. Rewrite
+ // the operand.
+ } else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) {
+ llvm::Value *operand = bitcast->getOperand(0);
+ operand = emitARCRetainAfterCall(CGF, operand);
+ bitcast->setOperand(0, operand);
+ return bitcast;
+
+ // Generic fall-back case.
+ } else {
+ // Retain using the non-block variant: we never need to do a copy
+ // of a block that's been returned to us.
+ return CGF.EmitARCRetainNonBlock(value);
+ }
+}
+
+static TryEmitResult
+tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
+ QualType originalType = e->getType();
+
+ // The desired result type, if it differs from the type of the
+ // ultimate opaque expression.
+ const llvm::Type *resultType = 0;
+
+ while (true) {
+ e = e->IgnoreParens();
+
+ // There's a break at the end of this if-chain; anything
+ // that wants to keep looping has to explicitly continue.
+ if (const CastExpr *ce = dyn_cast<CastExpr>(e)) {
+ switch (ce->getCastKind()) {
+ // No-op casts don't change the type, so we just ignore them.
+ case CK_NoOp:
+ e = ce->getSubExpr();
+ continue;
+
+ case CK_LValueToRValue: {
+ TryEmitResult loadResult
+ = tryEmitARCRetainLoadOfScalar(CGF, ce->getSubExpr());
+ if (resultType) {
+ llvm::Value *value = loadResult.getPointer();
+ value = CGF.Builder.CreateBitCast(value, resultType);
+ loadResult.setPointer(value);
+ }
+ return loadResult;
+ }
+
+ // These casts can change the type, so remember that and
+ // soldier on. We only need to remember the outermost such
+ // cast, though.
+ case CK_AnyPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_BitCast:
+ if (!resultType)
+ resultType = CGF.ConvertType(ce->getType());
+ e = ce->getSubExpr();
+ assert(e->getType()->hasPointerRepresentation());
+ continue;
+
+ // For consumptions, just emit the subexpression and thus elide
+ // the retain/release pair.
+ case CK_ObjCConsumeObject: {
+ llvm::Value *result = CGF.EmitScalarExpr(ce->getSubExpr());
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ case CK_GetObjCProperty: {
+ llvm::Value *result = emitARCRetainCall(CGF, ce);
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ default:
+ break;
+ }
+
+ // Skip __extension__.
+ } else if (const UnaryOperator *op = dyn_cast<UnaryOperator>(e)) {
+ if (op->getOpcode() == UO_Extension) {
+ e = op->getSubExpr();
+ continue;
+ }
+
+ // For calls and message sends, use the retained-call logic.
+ // Delegate inits are a special case in that they're the only
+ // returns-retained expression that *isn't* surrounded by
+ // a consume.
+ } else if (isa<CallExpr>(e) ||
+ (isa<ObjCMessageExpr>(e) &&
+ !cast<ObjCMessageExpr>(e)->isDelegateInitCall())) {
+ llvm::Value *result = emitARCRetainCall(CGF, e);
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ // Conservatively halt the search at any other expression kind.
+ break;
+ }
+
+ // We didn't find an obvious production, so emit what we've got and
+ // tell the caller that we didn't manage to retain.
+ llvm::Value *result = CGF.EmitScalarExpr(e);
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, false);
+}
+
+static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ LValue lvalue,
+ QualType type) {
+ TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type);
+ llvm::Value *value = result.getPointer();
+ if (!result.getInt())
+ value = CGF.EmitARCRetain(type, value);
+ return value;
+}
+
+/// EmitARCRetainScalarExpr - Semantically equivalent to
+/// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a
+/// best-effort attempt to peephole expressions that naturally produce
+/// retained objects.
+llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) {
+ TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
+ llvm::Value *value = result.getPointer();
+ if (!result.getInt())
+ value = EmitARCRetain(e->getType(), value);
+ return value;
+}
+
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) {
+ TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
+ llvm::Value *value = result.getPointer();
+ if (result.getInt())
+ value = EmitARCAutorelease(value);
+ else
+ value = EmitARCRetainAutorelease(e->getType(), value);
+ return value;
+}
+
+std::pair<LValue,llvm::Value*>
+CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e,
+ bool ignored) {
+ // Evaluate the RHS first.
+ TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS());
+ llvm::Value *value = result.getPointer();
+
+ LValue lvalue = EmitLValue(e->getLHS());
+
+ // If the RHS was emitted retained, expand this.
+ if (result.getInt()) {
+ llvm::Value *oldValue =
+ EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatileQualified(),
+ lvalue.getAlignment(), e->getType(),
+ lvalue.getTBAAInfo());
+ EmitStoreOfScalar(value, lvalue.getAddress(),
+ lvalue.isVolatileQualified(), lvalue.getAlignment(),
+ e->getType(), lvalue.getTBAAInfo());
+ EmitARCRelease(oldValue, /*precise*/ false);
+ } else {
+ value = EmitARCStoreStrong(lvalue, e->getType(), value, ignored);
+ }
+
+ return std::pair<LValue,llvm::Value*>(lvalue, value);
+}
+
+std::pair<LValue,llvm::Value*>
+CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) {
+ llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS());
+ LValue lvalue = EmitLValue(e->getLHS());
+
+ EmitStoreOfScalar(value, lvalue.getAddress(),
+ lvalue.isVolatileQualified(), lvalue.getAlignment(),
+ e->getType(), lvalue.getTBAAInfo());
+
+ return std::pair<LValue,llvm::Value*>(lvalue, value);
+}
+
+void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
+ const ObjCAutoreleasePoolStmt &ARPS) {
+ const Stmt *subStmt = ARPS.getSubStmt();
+ const CompoundStmt &S = cast<CompoundStmt>(*subStmt);
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI) {
+ DI->setLocation(S.getLBracLoc());
+ DI->EmitRegionStart(Builder);
+ }
+
+ // Keep track of the current cleanup stack depth.
+ RunCleanupsScope Scope(*this);
+ const llvm::Triple Triple = getContext().Target.getTriple();
+ if (CGM.getLangOptions().ObjCAutoRefCount ||
+ (CGM.isTargetDarwin() &&
+ ((Triple.getArch() == llvm::Triple::x86_64 &&
+ Triple.getDarwinMajorNumber() >= 11)
+ || (Triple.getEnvironmentName() == "iphoneos" &&
+ Triple.getDarwinMajorNumber() >= 5)))) {
+ llvm::Value *token = EmitObjCAutoreleasePoolPush();
+ EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token);
+ } else {
+ llvm::Value *token = EmitObjCMRRAutoreleasePoolPush();
+ EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token);
+ }
+
+ for (CompoundStmt::const_body_iterator I = S.body_begin(),
+ E = S.body_end(); I != E; ++I)
+ EmitStmt(*I);
+
+ if (DI) {
+ DI->setLocation(S.getRBracLoc());
+ DI->EmitRegionEnd(Builder);
+ }
+}
CGObjCRuntime::~CGObjCRuntime() {}
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index f0993c5..e85c898 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -969,7 +969,7 @@
ActualArgs.add(RValue::get(EnforceType(Builder, Receiver, IdTy)), ASTIdTy);
ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType());
- ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+ ActualArgs.addFrom(CallArgs);
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
@@ -1121,7 +1121,7 @@
CallArgList ActualArgs;
ActualArgs.add(RValue::get(Receiver), ASTIdTy);
ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType());
- ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+ ActualArgs.addFrom(CallArgs);
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 8c3e9a3..988b16e 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -63,10 +63,13 @@
/// The default messenger, used for sends whose ABI is unchanged from
/// the all-integer/pointer case.
llvm::Constant *getMessageSendFn() const {
+ // Add the non-lazy-bind attribute, since objc_msgSend is likely to
+ // be called a lot.
const llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, true),
- "objc_msgSend");
+ "objc_msgSend",
+ llvm::Attribute::NonLazyBind);
}
/// void objc_msgSend_stret (id, SEL, ...)
@@ -887,6 +890,11 @@
llvm::Value *EmitClassRef(CGBuilderTy &Builder,
const ObjCInterfaceDecl *ID);
+ llvm::Value *EmitClassRefFromId(CGBuilderTy &Builder,
+ IdentifierInfo *II);
+
+ llvm::Value *EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder);
+
/// EmitSuperClassRef - Emits reference to class's main metadata class.
llvm::Value *EmitSuperClassRef(const ObjCInterfaceDecl *ID);
@@ -1158,6 +1166,11 @@
/// for the given class reference.
llvm::Value *EmitClassRef(CGBuilderTy &Builder,
const ObjCInterfaceDecl *ID);
+
+ llvm::Value *EmitClassRefFromId(CGBuilderTy &Builder,
+ IdentifierInfo *II);
+
+ llvm::Value *EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder);
/// EmitSuperClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
/// for the given super class reference.
@@ -1526,7 +1539,7 @@
Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy, "tmp");
ActualArgs.add(RValue::get(Arg0), Arg0Ty);
ActualArgs.add(RValue::get(Sel), CGF.getContext().getObjCSelType());
- ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+ ActualArgs.addFrom(CallArgs);
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
@@ -1562,7 +1575,7 @@
if (FQT.isObjCGCStrong())
return Qualifiers::Strong;
- if (FQT.isObjCGCWeak())
+ if (FQT.isObjCGCWeak() || FQT.getObjCLifetime() == Qualifiers::OCL_Weak)
return Qualifiers::Weak;
if (FQT->isObjCObjectPointerType() || FQT->isBlockPointerType())
@@ -1579,7 +1592,8 @@
llvm::Constant *nullPtr =
llvm::Constant::getNullValue(llvm::Type::getInt8PtrTy(VMContext));
- if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC)
+ if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC &&
+ !CGM.getLangOptions().ObjCAutoRefCount)
return nullPtr;
bool hasUnion = false;
@@ -2129,7 +2143,7 @@
Interface->all_referenced_protocol_begin(),
Interface->all_referenced_protocol_end());
unsigned Flags = eClassFlags_Factory;
- if (ID->getNumIvarInitializers())
+ if (ID->hasCXXStructors())
Flags |= eClassFlags_HasCXXStructors;
unsigned Size =
CGM.getContext().getASTObjCImplementationLayout(ID).getSize().getQuantity();
@@ -3461,25 +3475,35 @@
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.SymtabPtrTy);
}
-llvm::Value *CGObjCMac::EmitClassRef(CGBuilderTy &Builder,
- const ObjCInterfaceDecl *ID) {
- LazySymbols.insert(ID->getIdentifier());
-
- llvm::GlobalVariable *&Entry = ClassReferences[ID->getIdentifier()];
-
+llvm::Value *CGObjCMac::EmitClassRefFromId(CGBuilderTy &Builder,
+ IdentifierInfo *II) {
+ LazySymbols.insert(II);
+
+ llvm::GlobalVariable *&Entry = ClassReferences[II];
+
if (!Entry) {
llvm::Constant *Casted =
- llvm::ConstantExpr::getBitCast(GetClassName(ID->getIdentifier()),
- ObjCTypes.ClassPtrTy);
+ llvm::ConstantExpr::getBitCast(GetClassName(II),
+ ObjCTypes.ClassPtrTy);
Entry =
- CreateMetadataVar("\01L_OBJC_CLASS_REFERENCES_", Casted,
- "__OBJC,__cls_refs,literal_pointers,no_dead_strip",
- 4, true);
+ CreateMetadataVar("\01L_OBJC_CLASS_REFERENCES_", Casted,
+ "__OBJC,__cls_refs,literal_pointers,no_dead_strip",
+ 4, true);
}
-
+
return Builder.CreateLoad(Entry, "tmp");
}
+llvm::Value *CGObjCMac::EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ return EmitClassRefFromId(Builder, ID->getIdentifier());
+}
+
+llvm::Value *CGObjCMac::EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder) {
+ IdentifierInfo *II = &CGM.getContext().Idents.get("NSAutoreleasePool");
+ return EmitClassRefFromId(Builder, II);
+}
+
llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel,
bool lvalue) {
llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
@@ -3567,12 +3591,18 @@
uint64_t MaxFieldOffset = 0;
uint64_t MaxSkippedFieldOffset = 0;
uint64_t LastBitfieldOrUnnamedOffset = 0;
+ uint64_t FirstFieldDelta = 0;
if (RecFields.empty())
return;
unsigned WordSizeInBits = CGM.getContext().Target.getPointerWidth(0);
unsigned ByteSizeInBits = CGM.getContext().Target.getCharWidth();
-
+ if (!RD && CGM.getLangOptions().ObjCAutoRefCount) {
+ FieldDecl *FirstField = RecFields[0];
+ FirstFieldDelta =
+ ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(FirstField));
+ }
+
for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
FieldDecl *Field = RecFields[i];
uint64_t FieldOffset;
@@ -3580,9 +3610,10 @@
// Note that 'i' here is actually the field index inside RD of Field,
// although this dependency is hidden.
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
- FieldOffset = RL.getFieldOffset(i) / ByteSizeInBits;
+ FieldOffset = (RL.getFieldOffset(i) / ByteSizeInBits) - FirstFieldDelta;
} else
- FieldOffset = ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(Field));
+ FieldOffset =
+ ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(Field)) - FirstFieldDelta;
// Skip over unnamed or bitfields
if (!Field->getIdentifier() || Field->isBitField()) {
@@ -3861,12 +3892,16 @@
bool hasUnion = false;
const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
- if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC)
+ if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC &&
+ !CGM.getLangOptions().ObjCAutoRefCount)
return llvm::Constant::getNullValue(PtrTy);
llvm::SmallVector<ObjCIvarDecl*, 32> Ivars;
const ObjCInterfaceDecl *OI = OMD->getClassInterface();
- CGM.getContext().DeepCollectObjCIvars(OI, true, Ivars);
+ if (CGM.getLangOptions().ObjCAutoRefCount)
+ CGM.getContext().ShallowCollectObjCIvars(OI, Ivars);
+ else
+ CGM.getContext().DeepCollectObjCIvars(OI, true, Ivars);
llvm::SmallVector<FieldDecl*, 32> RecFields;
for (unsigned k = 0, e = Ivars.size(); k != e; ++k)
@@ -4743,7 +4778,12 @@
CLS_META = 0x1,
CLS_ROOT = 0x2,
OBJC2_CLS_HIDDEN = 0x10,
- CLS_EXCEPTION = 0x20
+ CLS_EXCEPTION = 0x20,
+
+ /// (Obsolete) ARC-specific: this class has a .release_ivars method
+ CLS_HAS_IVAR_RELEASER = 0x40,
+ /// class was compiled with -fobjc-arr
+ CLS_COMPILED_BY_ARC = 0x80 // (1<<7)
};
/// BuildClassRoTInitializer - generate meta-data for:
/// struct _class_ro_t {
@@ -4767,6 +4807,10 @@
const ObjCImplementationDecl *ID) {
std::string ClassName = ID->getNameAsString();
std::vector<llvm::Constant*> Values(10); // 11 for 64bit targets!
+
+ if (CGM.getLangOptions().ObjCAutoRefCount)
+ flags |= CLS_COMPILED_BY_ARC;
+
Values[ 0] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
Values[ 1] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceStart);
Values[ 2] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceSize);
@@ -4936,7 +4980,7 @@
ID->getClassInterface()->getVisibility() == HiddenVisibility;
if (classIsHidden)
flags |= OBJC2_CLS_HIDDEN;
- if (ID->getNumIvarInitializers())
+ if (ID->hasCXXStructors())
flags |= eClassFlags_ABI2_HasCXXStructors;
if (!ID->getClassInterface()->getSuperClass()) {
// class is root
@@ -4972,7 +5016,7 @@
flags = CLS;
if (classIsHidden)
flags |= OBJC2_CLS_HIDDEN;
- if (ID->getNumIvarInitializers())
+ if (ID->hasCXXStructors())
flags |= eClassFlags_ABI2_HasCXXStructors;
if (hasObjCExceptionAttribute(CGM.getContext(), ID->getClassInterface()))
@@ -5719,28 +5763,39 @@
return GV;
}
-llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CGBuilderTy &Builder,
- const ObjCInterfaceDecl *ID) {
- llvm::GlobalVariable *&Entry = ClassReferences[ID->getIdentifier()];
-
+llvm::Value *CGObjCNonFragileABIMac::EmitClassRefFromId(CGBuilderTy &Builder,
+ IdentifierInfo *II) {
+ llvm::GlobalVariable *&Entry = ClassReferences[II];
+
if (!Entry) {
- std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ std::string ClassName(getClassSymbolPrefix() + II->getName().str());
llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
Entry =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
- false, llvm::GlobalValue::InternalLinkage,
- ClassGV,
- "\01L_OBJC_CLASSLIST_REFERENCES_$_");
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+ false, llvm::GlobalValue::InternalLinkage,
+ ClassGV,
+ "\01L_OBJC_CLASSLIST_REFERENCES_$_");
Entry->setAlignment(
- CGM.getTargetData().getABITypeAlignment(
- ObjCTypes.ClassnfABIPtrTy));
+ CGM.getTargetData().getABITypeAlignment(
+ ObjCTypes.ClassnfABIPtrTy));
Entry->setSection("__DATA, __objc_classrefs, regular, no_dead_strip");
CGM.AddUsedGlobal(Entry);
}
-
+
return Builder.CreateLoad(Entry, "tmp");
}
+llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ return EmitClassRefFromId(Builder, ID->getIdentifier());
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitNSAutoreleasePoolClassRef(
+ CGBuilderTy &Builder) {
+ IdentifierInfo *II = &CGM.getContext().Idents.get("NSAutoreleasePool");
+ return EmitClassRefFromId(Builder, II);
+}
+
llvm::Value *
CGObjCNonFragileABIMac::EmitSuperClassRef(CGBuilderTy &Builder,
const ObjCInterfaceDecl *ID) {
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index 866d5d8..7accc70 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -205,7 +205,13 @@
/// interface decl.
virtual llvm::Value *GetClass(CGBuilderTy &Builder,
const ObjCInterfaceDecl *OID) = 0;
-
+
+
+ virtual llvm::Value *EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder) {
+ assert(false &&"autoreleasepool unsupported in this ABI");
+ return 0;
+ }
+
/// EnumerationMutationFunction - Return the function that's called by the
/// compiler when a mutation is detected during foreach iteration.
virtual llvm::Constant *EnumerationMutationFunction() = 0;
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index a982621..187d5a6 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -151,6 +151,9 @@
case Stmt::ObjCForCollectionStmtClass:
EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
break;
+ case Stmt::ObjCAutoreleasePoolStmtClass:
+ EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
+ break;
case Stmt::CXXTryStmtClass:
EmitCXXTryStmt(cast<CXXTryStmt>(*S));
@@ -764,7 +767,7 @@
} else if (RV->getType()->isAnyComplexType()) {
EmitComplexExprIntoAddr(RV, ReturnValue, false);
} else {
- EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, false, true));
+ EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Qualifiers(), true));
}
EmitBranchThroughCleanup(ReturnBlock);
diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h
index 7f77d55..1ad6628 100644
--- a/lib/CodeGen/CGValue.h
+++ b/lib/CodeGen/CGValue.h
@@ -101,8 +101,6 @@
/// bitfields, this is not a simple LLVM pointer, it may be a pointer plus a
/// bitrange.
class LValue {
- // FIXME: alignment?
-
enum {
Simple, // This is a normal l-value, use getAddress().
VectorElt, // This is a vector element l-value (V[i]), use getVector*
@@ -318,9 +316,11 @@
class AggValueSlot {
/// The address.
llvm::Value *Addr;
+
+ // Qualifiers
+ Qualifiers Quals;
// Associated flags.
- bool VolatileFlag : 1;
bool LifetimeFlag : 1;
bool RequiresGCollection : 1;
@@ -335,25 +335,31 @@
static AggValueSlot ignored() {
AggValueSlot AV;
AV.Addr = 0;
- AV.VolatileFlag = AV.LifetimeFlag = AV.RequiresGCollection = AV.IsZeroed =0;
+ AV.Quals = Qualifiers();
+ AV.LifetimeFlag = AV.RequiresGCollection = AV.IsZeroed =0;
return AV;
}
/// forAddr - Make a slot for an aggregate value.
///
/// \param Volatile - true if the slot should be volatile-initialized
+ ///
+ /// \param Qualifiers - The qualifiers that dictate how the slot
+ /// should be initialied. Only 'volatile' and the Objective-C
+ /// lifetime qualifiers matter.
+ ///
/// \param LifetimeExternallyManaged - true if the slot's lifetime
/// is being externally managed; false if a destructor should be
/// registered for any temporaries evaluated into the slot
/// \param RequiresGCollection - true if the slot is located
/// somewhere that ObjC GC calls should be emitted for
- static AggValueSlot forAddr(llvm::Value *Addr, bool Volatile,
+ static AggValueSlot forAddr(llvm::Value *Addr, Qualifiers Quals,
bool LifetimeExternallyManaged,
bool RequiresGCollection = false,
bool IsZeroed = false) {
AggValueSlot AV;
AV.Addr = Addr;
- AV.VolatileFlag = Volatile;
+ AV.Quals = Quals;
AV.LifetimeFlag = LifetimeExternallyManaged;
AV.RequiresGCollection = RequiresGCollection;
AV.IsZeroed = IsZeroed;
@@ -362,7 +368,7 @@
static AggValueSlot forLValue(LValue LV, bool LifetimeExternallyManaged,
bool RequiresGCollection = false) {
- return forAddr(LV.getAddress(), LV.isVolatileQualified(),
+ return forAddr(LV.getAddress(), LV.getQuals(),
LifetimeExternallyManaged, RequiresGCollection);
}
@@ -373,8 +379,14 @@
LifetimeFlag = Managed;
}
+ Qualifiers getQualifiers() const { return Quals; }
+
bool isVolatile() const {
- return VolatileFlag;
+ return Quals.hasVolatile();
+ }
+
+ Qualifiers::ObjCLifetime getObjCLifetime() const {
+ return Quals.getObjCLifetime();
}
bool requiresGCollection() const {
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index 150cb69..6ab4b76 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -31,7 +31,7 @@
CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
: CodeGenTypeCache(cgm), CGM(cgm),
Target(CGM.getContext().Target), Builder(cgm.getModule().getContext()),
- BlockInfo(0), BlockPointer(0),
+ AutoreleaseResult(false), BlockInfo(0), BlockPointer(0),
NormalCleanupDest(0), EHCleanupDest(0), NextCleanupDestIndex(1),
ExceptionSlot(0), EHSelectorSlot(0),
DebugInfo(0), DisableDebugInfo(false), DidCallStackSave(false),
@@ -142,6 +142,13 @@
assert(BreakContinueStack.empty() &&
"mismatched push/pop in break/continue stack!");
+ // Pop any cleanups that might have been associated with the
+ // parameters. Do this in whatever block we're currently in; it's
+ // important to do this before we enter the return block or return
+ // edges will be *really* confused.
+ if (EHStack.stable_begin() != PrologueCleanupDepth)
+ PopCleanupBlocks(PrologueCleanupDepth);
+
// Emit function epilog (to return).
EmitReturnBlock();
@@ -311,9 +318,19 @@
ReturnValue = CurFn->arg_begin();
} else {
ReturnValue = CreateIRTemp(RetTy, "retval");
+
+ // Tell the epilog emitter to autorelease the result. We do this
+ // now so that various specialized functions can suppress it
+ // during their IR-generation.
+ if (getLangOptions().ObjCAutoRefCount &&
+ !CurFnInfo->isReturnsRetained() &&
+ RetTy->isObjCRetainableType())
+ AutoreleaseResult = true;
}
EmitStartEHSpec(CurCodeDecl);
+
+ PrologueCleanupDepth = EHStack.stable_begin();
EmitFunctionProlog(*CurFnInfo, CurFn, Args);
if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance())
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index bb8fd8e..f350228 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -18,6 +18,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/CharUnits.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/DenseMap.h"
@@ -63,6 +64,7 @@
class ObjCAtTryStmt;
class ObjCAtThrowStmt;
class ObjCAtSynchronizedStmt;
+ class ObjCAutoreleasePoolStmt;
namespace CodeGen {
class CodeGenTypes;
@@ -568,6 +570,10 @@
/// CurGD - The GlobalDecl for the current function being compiled.
GlobalDecl CurGD;
+ /// PrologueCleanupDepth - The cleanup depth enclosing all the
+ /// cleanups associated with the parameters.
+ EHScopeStack::stable_iterator PrologueCleanupDepth;
+
/// ReturnBlock - Unified return block.
JumpDest ReturnBlock;
@@ -584,6 +590,9 @@
bool CatchUndefined;
+ /// In ARC, whether we should autorelease the return value.
+ bool AutoreleaseResult;
+
const CodeGen::CGBlockInfo *BlockInfo;
llvm::Value *BlockPointer;
@@ -1048,6 +1057,9 @@
void disableDebugInfo() { DisableDebugInfo = true; }
void enableDebugInfo() { DisableDebugInfo = false; }
+ bool shouldUseFusedARCCalls() {
+ return CGM.getCodeGenOpts().OptimizationLevel == 0;
+ }
const LangOptions &getLangOptions() const { return CGM.getLangOptions(); }
@@ -1345,7 +1357,8 @@
/// CreateAggTemp - Create a temporary memory object for the given
/// aggregate type.
AggValueSlot CreateAggTemp(QualType T, const llvm::Twine &Name = "tmp") {
- return AggValueSlot::forAddr(CreateMemTemp(T, Name), false, false);
+ return AggValueSlot::forAddr(CreateMemTemp(T, Name), T.getQualifiers(),
+ false);
}
/// Emit a cast to void* in the appropriate address space.
@@ -1379,12 +1392,11 @@
/// EmitAnyExprToMem - Emits the code necessary to evaluate an
/// arbitrary expression into the given memory location.
void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
- bool IsLocationVolatile,
- bool IsInitializer);
+ Qualifiers Quals, bool IsInitializer);
/// EmitExprAsInit - Emits the code necessary to initialize a
/// location in memory with the given initializer.
- void EmitExprAsInit(const Expr *init, const VarDecl *var,
+ void EmitExprAsInit(const Expr *init, const ValueDecl *D,
llvm::Value *loc, CharUnits alignment,
bool capturedByInit);
@@ -1584,6 +1596,10 @@
/// This function can be called with a null (unreachable) insert point.
void EmitVarDecl(const VarDecl &D);
+ void EmitScalarInit(const Expr *init, const ValueDecl *D,
+ llvm::Value *addr, bool capturedByInit,
+ bool isVolatile, unsigned alignment, QualType type);
+
typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
llvm::Value *Address);
@@ -1709,6 +1725,7 @@
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
+ void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
llvm::Constant *getUnwindResumeFn();
llvm::Constant *getUnwindResumeOrRethrowFn();
@@ -1961,6 +1978,64 @@
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
ReturnValueSlot Return = ReturnValueSlot());
+ /// Retrieves the default cleanup kind for an ARC cleanup.
+ /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
+ CleanupKind getARCCleanupKind() {
+ return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
+ ? NormalAndEHCleanup : NormalCleanup;
+ }
+
+ // ARC primitives.
+ void EmitARCInitWeak(llvm::Value *value, llvm::Value *addr);
+ void EmitARCDestroyWeak(llvm::Value *addr);
+ llvm::Value *EmitARCLoadWeak(llvm::Value *addr);
+ llvm::Value *EmitARCLoadWeakRetained(llvm::Value *addr);
+ llvm::Value *EmitARCStoreWeak(llvm::Value *value, llvm::Value *addr,
+ bool ignored);
+ void EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src);
+ void EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src);
+ llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
+ llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
+ llvm::Value *EmitARCStoreStrong(LValue addr, QualType type,
+ llvm::Value *value, bool ignored);
+ llvm::Value *EmitARCStoreStrongCall(llvm::Value *addr, llvm::Value *value,
+ bool ignored);
+ llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
+ llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
+ llvm::Value *EmitARCRetainBlock(llvm::Value *value);
+ void EmitARCRelease(llvm::Value *value, bool precise);
+ llvm::Value *EmitARCAutorelease(llvm::Value *value);
+ llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
+ llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
+ llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
+
+ std::pair<LValue,llvm::Value*>
+ EmitARCStoreAutoreleasing(const BinaryOperator *e);
+ std::pair<LValue,llvm::Value*>
+ EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
+
+ llvm::Value *EmitObjCProduceObject(QualType T, llvm::Value *Ptr);
+ llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
+ llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
+
+ llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
+ llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
+
+ void PushARCReleaseCleanup(CleanupKind kind, QualType type,
+ llvm::Value *addr, bool precise);
+ void PushARCWeakReleaseCleanup(CleanupKind kind, QualType type,
+ llvm::Value *addr);
+ void PushARCFieldReleaseCleanup(CleanupKind cleanupKind,
+ const FieldDecl *Field);
+ void PushARCFieldWeakReleaseCleanup(CleanupKind cleanupKind,
+ const FieldDecl *Field);
+
+ void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
+ llvm::Value *EmitObjCAutoreleasePoolPush();
+ llvm::Value *EmitObjCMRRAutoreleasePoolPush();
+ void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
+ void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
+
/// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
/// expression. Will emit a temporary variable if E is not an LValue.
RValue EmitReferenceBindingToExpr(const Expr* E,
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index 7a1a968..78c57b4 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -64,7 +64,7 @@
ABI(createCXXABI(*this)),
Types(C, M, TD, getTargetCodeGenInfo().getABIInfo(), ABI),
TBAA(0),
- VTables(*this), Runtime(0), DebugInfo(0),
+ VTables(*this), Runtime(0), DebugInfo(0), ARCData(0), RRData(0),
CFConstantStringClassRef(0), ConstantStringClassRef(0),
VMContext(M.getContext()),
NSConcreteGlobalBlockDecl(0), NSConcreteStackBlockDecl(0),
@@ -88,6 +88,10 @@
Block.GlobalUniqueCount = 0;
+ if (C.getLangOptions().ObjCAutoRefCount)
+ ARCData = new ARCEntrypoints();
+ RRData = new RREntrypoints();
+
// Initialize the type cache.
llvm::LLVMContext &LLVMContext = M.getContext();
VoidTy = llvm::Type::getVoidTy(LLVMContext);
@@ -108,6 +112,8 @@
delete &ABI;
delete TBAA;
delete DebugInfo;
+ delete ARCData;
+ delete RRData;
}
void CodeGenModule::createObjCRuntime() {
@@ -830,7 +836,8 @@
llvm::Constant *
CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
const llvm::Type *Ty,
- GlobalDecl D, bool ForVTable) {
+ GlobalDecl D, bool ForVTable,
+ llvm::Attributes ExtraAttrs) {
// Lookup the entry, lazily creating it if necessary.
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry) {
@@ -869,6 +876,8 @@
assert(F->getName() == MangledName && "name was uniqued!");
if (D.getDecl())
SetFunctionAttributes(D, F, IsIncompleteFunction);
+ if (ExtraAttrs != llvm::Attribute::None)
+ F->addFnAttr(ExtraAttrs);
// This is the first use or definition of a mangled name. If there is a
// deferred decl with this name, remember that we need to emit it at the end
@@ -937,8 +946,10 @@
/// type and name.
llvm::Constant *
CodeGenModule::CreateRuntimeFunction(const llvm::FunctionType *FTy,
- llvm::StringRef Name) {
- return GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false);
+ llvm::StringRef Name,
+ llvm::Attributes ExtraAttrs) {
+ return GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
+ ExtraAttrs);
}
static bool DeclIsConstantGlobal(ASTContext &Context, const VarDecl *D,
@@ -1997,6 +2008,7 @@
false, true, false, ObjCMethodDecl::Required);
D->addInstanceMethod(DTORMethod);
CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false);
+ D->setHasCXXStructors(true);
}
// If the implementation doesn't have any ivar initializers, we don't need
@@ -2015,6 +2027,7 @@
ObjCMethodDecl::Required);
D->addInstanceMethod(CTORMethod);
CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, CTORMethod, true);
+ D->setHasCXXStructors(true);
}
/// EmitNamespace - Emit all declarations in a namespace.
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index 09741df..7321ac4 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -131,6 +131,71 @@
/// The alignment of a pointer into the generic address space.
unsigned char PointerAlignInBytes;
};
+
+struct RREntrypoints {
+ RREntrypoints() { memset(this, 0, sizeof(*this)); }
+ /// void objc_autoreleasePoolPop(void*);
+ llvm::Constant *objc_autoreleasePoolPop;
+
+ /// void *objc_autoreleasePoolPush(void);
+ llvm::Constant *objc_autoreleasePoolPush;
+};
+
+struct ARCEntrypoints {
+ ARCEntrypoints() { memset(this, 0, sizeof(*this)); }
+
+ /// id objc_autorelease(id);
+ llvm::Constant *objc_autorelease;
+
+ /// id objc_autoreleaseReturnValue(id);
+ llvm::Constant *objc_autoreleaseReturnValue;
+
+ /// void objc_copyWeak(id *dest, id *src);
+ llvm::Constant *objc_copyWeak;
+
+ /// void objc_destroyWeak(id*);
+ llvm::Constant *objc_destroyWeak;
+
+ /// id objc_initWeak(id*, id);
+ llvm::Constant *objc_initWeak;
+
+ /// id objc_loadWeak(id*);
+ llvm::Constant *objc_loadWeak;
+
+ /// id objc_loadWeakRetained(id*);
+ llvm::Constant *objc_loadWeakRetained;
+
+ /// void objc_moveWeak(id *dest, id *src);
+ llvm::Constant *objc_moveWeak;
+
+ /// id objc_retain(id);
+ llvm::Constant *objc_retain;
+
+ /// id objc_retainAutorelease(id);
+ llvm::Constant *objc_retainAutorelease;
+
+ /// id objc_retainAutoreleaseReturnValue(id);
+ llvm::Constant *objc_retainAutoreleaseReturnValue;
+
+ /// id objc_retainAutoreleasedReturnValue(id);
+ llvm::Constant *objc_retainAutoreleasedReturnValue;
+
+ /// id objc_retainBlock(id);
+ llvm::Constant *objc_retainBlock;
+
+ /// void objc_release(id);
+ llvm::Constant *objc_release;
+
+ /// id objc_storeStrong(id*, id);
+ llvm::Constant *objc_storeStrong;
+
+ /// id objc_storeWeak(id*, id);
+ llvm::Constant *objc_storeWeak;
+
+ /// A void(void) inline asm to use to mark that the return value of
+ /// a call will be immediately retain.
+ llvm::InlineAsm *retainAutoreleasedReturnValueMarker;
+};
/// CodeGenModule - This class organizes the cross-function state that is used
/// while generating LLVM code.
@@ -157,6 +222,8 @@
CGObjCRuntime* Runtime;
CGDebugInfo* DebugInfo;
+ ARCEntrypoints *ARCData;
+ RREntrypoints *RRData;
// WeakRefReferences - A set of references that have only been seen via
// a weakref so far. This is used to remove the weak of the reference if we ever
@@ -275,6 +342,16 @@
/// getCXXABI() - Return a reference to the configured C++ ABI.
CGCXXABI &getCXXABI() { return ABI; }
+ ARCEntrypoints &getARCEntrypoints() const {
+ assert(getLangOptions().ObjCAutoRefCount && ARCData != 0);
+ return *ARCData;
+ }
+
+ RREntrypoints &getRREntrypoints() const {
+ assert(RRData != 0);
+ return *RRData;
+ }
+
llvm::Value *getStaticLocalDeclAddress(const VarDecl *VD) {
return StaticLocalDeclMap[VD];
}
@@ -474,7 +551,7 @@
/// created).
llvm::Constant *GetAddrOfConstantCString(const std::string &str,
const char *GlobalName=0);
-
+
/// GetAddrOfCXXConstructor - Return the address of the constructor of the
/// given type.
llvm::GlobalValue *GetAddrOfCXXConstructor(const CXXConstructorDecl *ctor,
@@ -514,7 +591,9 @@
/// CreateRuntimeFunction - Create a new runtime function with the specified
/// type and name.
llvm::Constant *CreateRuntimeFunction(const llvm::FunctionType *Ty,
- llvm::StringRef Name);
+ llvm::StringRef Name,
+ llvm::Attributes ExtraAttrs =
+ llvm::Attribute::None);
/// CreateRuntimeVariable - Create a new runtime global variable with the
/// specified type and name.
llvm::Constant *CreateRuntimeVariable(const llvm::Type *Ty,
@@ -642,7 +721,9 @@
llvm::Constant *GetOrCreateLLVMFunction(llvm::StringRef MangledName,
const llvm::Type *Ty,
GlobalDecl D,
- bool ForVTable);
+ bool ForVTable,
+ llvm::Attributes ExtraAttrs =
+ llvm::Attribute::None);
llvm::Constant *GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
const llvm::PointerType *PTy,
const VarDecl *D,
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index 12ef9bd..7e71df1 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -802,10 +802,27 @@
if (expr->doesUsualArrayDeleteWantSize())
return true;
+ // Automatic Reference Counting:
+ // We need an array cookie for pointers with strong or weak lifetime.
+ QualType AllocatedType = expr->getAllocatedType();
+ if (getContext().getLangOptions().ObjCAutoRefCount &&
+ AllocatedType->isObjCLifetimeType()) {
+ switch (AllocatedType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ return true;
+ }
+ }
+
// Otherwise, if the class has a non-trivial destructor, it always
// needs a cookie.
const CXXRecordDecl *record =
- expr->getAllocatedType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ AllocatedType->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
return (record && !record->hasTrivialDestructor());
}
@@ -816,6 +833,22 @@
if (expr->doesUsualArrayDeleteWantSize())
return true;
+ // Automatic Reference Counting:
+ // We need an array cookie for pointers with strong or weak lifetime.
+ if (getContext().getLangOptions().ObjCAutoRefCount &&
+ elementType->isObjCLifetimeType()) {
+ switch (elementType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ return true;
+ }
+ }
+
// Otherwise, if the class has a non-trivial destructor, it always
// needs a cookie.
const CXXRecordDecl *record =
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index 043ead7..9a6cc20 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -2277,6 +2277,10 @@
return 13;
}
+ llvm::StringRef getARCRetainAutoreleasedReturnValueMarker() const {
+ return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
+ }
+
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
CodeGen::CGBuilderTy &Builder = CGF.Builder;
@@ -2290,8 +2294,6 @@
return false;
}
-
-
};
}
diff --git a/lib/CodeGen/TargetInfo.h b/lib/CodeGen/TargetInfo.h
index 4f59eb6..dbb09b1 100644
--- a/lib/CodeGen/TargetInfo.h
+++ b/lib/CodeGen/TargetInfo.h
@@ -111,6 +111,20 @@
const llvm::Type* Ty) const {
return Ty;
}
+
+ /// Retrieve the address of a function to call immediately before
+ /// calling objc_retainAutoreleasedReturnValue. The
+ /// implementation of objc_autoreleaseReturnValue sniffs the
+ /// instruction stream following its return address to decide
+ /// whether it's a call to objc_retainAutoreleasedReturnValue.
+ /// This can be prohibitively expensive, depending on the
+ /// relocation model, and so on some targets it instead sniffs for
+ /// a particular instruction sequence. This functions returns
+ /// that instruction sequence in inline assembly, which will be
+ /// empty if none is required.
+ virtual llvm::StringRef getARCRetainAutoreleasedReturnValueMarker() const {
+ return "";
+ }
};
}