During jump-scope checking, build an ExprWithCleanups immediately
into the enclosing scope; this is a more accurate model but is
(I believe) unnecessary in my test case due to other flaws.
However, one of those flaws is now intentional: blocks which
appear in return statements can be trivially observed to not
extend in lifetime past the return, and so we can allow a jump
past them. Do the necessary magic in IR-generation to make
this work.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@164589 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index 4fdbfd0..77414bf 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -743,6 +743,17 @@
// Emit the result value, even if unused, to evalute the side effects.
const Expr *RV = S.getRetValue();
+ // Treat block literals in a return expression as if they appeared
+ // in their own scope. This permits a small, easily-implemented
+ // exception to our over-conservative rules about not jumping to
+ // statements following block literals with non-trivial cleanups.
+ RunCleanupsScope cleanupScope(*this);
+ if (const ExprWithCleanups *cleanups =
+ dyn_cast_or_null<ExprWithCleanups>(RV)) {
+ enterFullExpression(cleanups);
+ RV = cleanups->getSubExpr();
+ }
+
// FIXME: Clean this up by using an LValue for ReturnTemp,
// EmitStoreThroughLValue, and EmitAnyExpr.
if (S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable() &&
@@ -779,6 +790,7 @@
AggValueSlot::IsNotAliased));
}
+ cleanupScope.ForceCleanup();
EmitBranchThroughCleanup(ReturnBlock);
}
diff --git a/lib/Sema/JumpDiagnostics.cpp b/lib/Sema/JumpDiagnostics.cpp
index ab786c6..a48779a 100644
--- a/lib/Sema/JumpDiagnostics.cpp
+++ b/lib/Sema/JumpDiagnostics.cpp
@@ -453,14 +453,19 @@
BuildScopeInformation(AS->getSubStmt(), (newParentScope = Scopes.size()-1));
continue;
}
-
- if (const BlockExpr *BE = dyn_cast<BlockExpr>(SubStmt)) {
- const BlockDecl *BDecl = BE->getBlockDecl();
+
+ // Disallow jumps past full-expressions that use blocks with
+ // non-trivial cleanups of their captures. This is theoretically
+ // implementable but a lot of work which we haven't felt up to doing.
+ if (ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(SubStmt)) {
+ for (unsigned i = 0, e = EWC->getNumObjects(); i != e; ++i) {
+ const BlockDecl *BDecl = EWC->getObject(i);
for (BlockDecl::capture_const_iterator ci = BDecl->capture_begin(),
ce = BDecl->capture_end(); ci != ce; ++ci) {
VarDecl *variable = ci->getVariable();
BuildScopeInformation(variable, BDecl, ParentScope);
}
+ }
}
// Recursively walk the AST.
diff --git a/test/CodeGenCXX/exceptions.cpp b/test/CodeGenCXX/exceptions.cpp
index 8c20c9e..723e8d1 100644
--- a/test/CodeGenCXX/exceptions.cpp
+++ b/test/CodeGenCXX/exceptions.cpp
@@ -365,6 +365,7 @@
// CHECK-NEXT: invoke void @_ZN5test71BC1ERKNS_1AEPS0_(
// CHECK: store i1 false, i1* [[OUTER_NEW]]
// CHECK: phi
+ // CHECK-NEXT: store [[B]]*
// Destroy the inner A object.
// CHECK-NEXT: load i1* [[INNER_A]]
diff --git a/test/CodeGenObjC/arc-blocks.m b/test/CodeGenObjC/arc-blocks.m
index 2326bce..cbdb494 100644
--- a/test/CodeGenObjC/arc-blocks.m
+++ b/test/CodeGenObjC/arc-blocks.m
@@ -532,3 +532,60 @@
// CHECK-NEXT: [[SLOTREL:%.*]] = getelementptr inbounds [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
// CHECK-NEXT: store void ()* null, void ()** [[BLKVAR]], align 8
}
+
+// rdar://12151005
+//
+// This is an intentional exception to our conservative jump-scope
+// checking for full-expressions containing block literals with
+// non-trivial cleanups: if the block literal appears in the operand
+// of a return statement, there's no need to extend its lifetime.
+id (^test17(id self, int which))(void) {
+ switch (which) {
+ case 1: return ^{ return self; };
+ case 0: return ^{ return self; };
+ }
+ return (void*) 0;
+}
+// CHECK: define i8* ()* @test17(
+// CHECK: [[RET:%.*]] = alloca i8* ()*, align
+// CHECK-NEXT: [[SELF:%.*]] = alloca i8*,
+// CHECK: [[B0:%.*]] = alloca [[BLOCK:<.*>]], align
+// CHECK: [[B1:%.*]] = alloca [[BLOCK]], align
+// CHECK: [[T0:%.*]] = call i8* @objc_retain(i8*
+// CHECK-NEXT: store i8* [[T0]], i8** [[SELF]], align
+// CHECK-NOT: objc_retain
+// CHECK-NOT: objc_release
+// CHECK: [[DESTROY:%.*]] = getelementptr inbounds [[BLOCK]]* [[B0]], i32 0, i32 5
+// CHECK-NOT: objc_retain
+// CHECK-NOT: objc_release
+// CHECK: [[T0:%.*]] = getelementptr inbounds [[BLOCK]]* [[B0]], i32 0, i32 5
+// CHECK-NEXT: [[T1:%.*]] = load i8** [[SELF]], align
+// CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retain(i8* [[T1]])
+// CHECK-NEXT: store i8* [[T2]], i8** [[T0]],
+// CHECK-NEXT: [[T0:%.*]] = bitcast [[BLOCK]]* [[B0]] to i8* ()*
+// CHECK-NEXT: [[T1:%.*]] = bitcast i8* ()* [[T0]] to i8*
+// CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retainBlock(i8* [[T1]])
+// CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to i8* ()*
+// CHECK-NEXT: store i8* ()* [[T3]], i8* ()** [[RET]]
+// CHECK-NEXT: [[T0:%.*]] = load i8** [[DESTROY]]
+// CHECK-NEXT: call void @objc_release(i8* [[T0]])
+// CHECK-NEXT: store i32
+// CHECK-NEXT: br label
+// CHECK-NOT: objc_retain
+// CHECK-NOT: objc_release
+// CHECK: [[DESTROY:%.*]] = getelementptr inbounds [[BLOCK]]* [[B1]], i32 0, i32 5
+// CHECK-NOT: objc_retain
+// CHECK-NOT: objc_release
+// CHECK: [[T0:%.*]] = getelementptr inbounds [[BLOCK]]* [[B1]], i32 0, i32 5
+// CHECK-NEXT: [[T1:%.*]] = load i8** [[SELF]], align
+// CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retain(i8* [[T1]])
+// CHECK-NEXT: store i8* [[T2]], i8** [[T0]],
+// CHECK-NEXT: [[T0:%.*]] = bitcast [[BLOCK]]* [[B1]] to i8* ()*
+// CHECK-NEXT: [[T1:%.*]] = bitcast i8* ()* [[T0]] to i8*
+// CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retainBlock(i8* [[T1]])
+// CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to i8* ()*
+// CHECK-NEXT: store i8* ()* [[T3]], i8* ()** [[RET]]
+// CHECK-NEXT: [[T0:%.*]] = load i8** [[DESTROY]]
+// CHECK-NEXT: call void @objc_release(i8* [[T0]])
+// CHECK-NEXT: store i32
+// CHECK-NEXT: br label