Inline methods that throw.
Forked from https://android-review.googlesource.com/214292
test: 637-checker-throw-inline
bug: 30933338
Change-Id: I184be82dfab0710af3f3796e9e486c7817fa9c60
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 2dd5fef..e012a42 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -69,17 +69,13 @@
// doing some logic in the runtime to discover if a method could have been inlined.
return;
}
- const ArenaVector<HBasicBlock*>& blocks = graph_->GetReversePostOrder();
+ // Keep a copy of all blocks when starting the visit.
+ ArenaVector<HBasicBlock*> blocks = graph_->GetReversePostOrder();
DCHECK(!blocks.empty());
- HBasicBlock* next_block = blocks[0];
- for (size_t i = 0; i < blocks.size(); ++i) {
- // Because we are changing the graph when inlining, we need to remember the next block.
- // This avoids doing the inlining work again on the inlined blocks.
- if (blocks[i] != next_block) {
- continue;
- }
- HBasicBlock* block = next_block;
- next_block = (i == blocks.size() - 1) ? nullptr : blocks[i + 1];
+ // Because we are changing the graph when inlining,
+ // we just iterate over the blocks of the outer method.
+ // This avoids doing the inlining work again on the inlined blocks.
+ for (HBasicBlock* block : blocks) {
for (HInstruction* instruction = block->GetFirstInstruction(); instruction != nullptr;) {
HInstruction* next = instruction->GetNext();
HInvoke* call = instruction->AsInvoke();
@@ -564,7 +560,6 @@
bb_cursor->InsertInstructionAfter(load_class, receiver_class);
load_class->SetLoadKind(kind);
- // TODO: Extend reference type propagation to understand the guard.
HNotEqual* compare = new (graph_->GetArena()) HNotEqual(load_class, receiver_class);
bb_cursor->InsertInstructionAfter(compare, load_class);
if (with_deoptimization) {
@@ -848,7 +843,6 @@
if (outermost_graph_->IsCompilingOsr()) {
CreateDiamondPatternForPolymorphicInline(compare, return_replacement, invoke_instruction);
} else {
- // TODO: Extend reference type propagation to understand the guard.
HDeoptimize* deoptimize = new (graph_->GetArena()) HDeoptimize(
compare, invoke_instruction->GetDexPc());
bb_cursor->InsertInstructionAfter(deoptimize, compare);
@@ -1354,9 +1348,6 @@
RunOptimizations(callee_graph, code_item, dex_compilation_unit);
number_of_instructions_budget += number_of_inlined_instructions;
- // TODO: We should abort only if all predecessors throw. However,
- // HGraph::InlineInto currently does not handle an exit block with
- // a throw predecessor.
HBasicBlock* exit_block = callee_graph->GetExitBlock();
if (exit_block == nullptr) {
VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
@@ -1364,16 +1355,30 @@
return false;
}
- bool has_throw_predecessor = false;
+ bool has_one_return = false;
for (HBasicBlock* predecessor : exit_block->GetPredecessors()) {
if (predecessor->GetLastInstruction()->IsThrow()) {
- has_throw_predecessor = true;
- break;
+ if (invoke_instruction->GetBlock()->IsTryBlock()) {
+ // TODO(ngeoffray): Support adding HTryBoundary in Hgraph::InlineInto.
+ VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because one branch always throws and"
+ << " caller is in a try/catch block";
+ return false;
+ } else if (graph_->GetExitBlock() == nullptr) {
+ // TODO(ngeoffray): Support adding HExit in the caller graph.
+ VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because one branch always throws and"
+ << " caller does not have an exit block";
+ return false;
+ }
+ } else {
+ has_one_return = true;
}
}
- if (has_throw_predecessor) {
+
+ if (!has_one_return) {
VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because one branch always throws";
+ << " could not be inlined because it always throws";
return false;
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index abbb91a..71a26eb 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2038,6 +2038,8 @@
HInstruction* return_value = nullptr;
if (GetBlocks().size() == 3) {
+ // Inliner already made sure we don't inline methods that always throw.
+ DCHECK(!GetBlocks()[1]->GetLastInstruction()->IsThrow());
// Simple case of an entry block, a body block, and an exit block.
// Put the body block's instruction into `invoke`'s block.
HBasicBlock* body = GetBlocks()[1];
@@ -2119,33 +2121,60 @@
UpdateLoopAndTryInformationOfNewBlock(to, at, /* replace_if_back_edge */ true);
// Update all predecessors of the exit block (now the `to` block)
- // to not `HReturn` but `HGoto` instead.
- bool returns_void = to->GetPredecessors()[0]->GetLastInstruction()->IsReturnVoid();
- if (to->GetPredecessors().size() == 1) {
- HBasicBlock* predecessor = to->GetPredecessors()[0];
+ // to not `HReturn` but `HGoto` instead. Special case throwing blocks
+ // to now get the outer graph exit block as successor. Note that the inliner
+ // currently doesn't support inlining methods with try/catch.
+ HPhi* return_value_phi = nullptr;
+ bool rerun_dominance = false;
+ bool rerun_loop_analysis = false;
+ for (size_t pred = 0; pred < to->GetPredecessors().size(); ++pred) {
+ HBasicBlock* predecessor = to->GetPredecessors()[pred];
HInstruction* last = predecessor->GetLastInstruction();
- if (!returns_void) {
- return_value = last->InputAt(0);
- }
- predecessor->AddInstruction(new (allocator) HGoto(last->GetDexPc()));
- predecessor->RemoveInstruction(last);
- } else {
- if (!returns_void) {
- // There will be multiple returns.
- return_value = new (allocator) HPhi(
- allocator, kNoRegNumber, 0, HPhi::ToPhiType(invoke->GetType()), to->GetDexPc());
- to->AddPhi(return_value->AsPhi());
- }
- for (HBasicBlock* predecessor : to->GetPredecessors()) {
- HInstruction* last = predecessor->GetLastInstruction();
- if (!returns_void) {
+ if (last->IsThrow()) {
+ DCHECK(!at->IsTryBlock());
+ predecessor->ReplaceSuccessor(to, outer_graph->GetExitBlock());
+ --pred;
+ // We need to re-run dominance information, as the exit block now has
+ // a new dominator.
+ rerun_dominance = true;
+ if (predecessor->GetLoopInformation() != nullptr) {
+ // The exit block and blocks post dominated by the exit block do not belong
+ // to any loop. Because we do not compute the post dominators, we need to re-run
+ // loop analysis to get the loop information correct.
+ rerun_loop_analysis = true;
+ }
+ } else {
+ if (last->IsReturnVoid()) {
+ DCHECK(return_value == nullptr);
+ DCHECK(return_value_phi == nullptr);
+ } else {
DCHECK(last->IsReturn());
- return_value->AsPhi()->AddInput(last->InputAt(0));
+ if (return_value_phi != nullptr) {
+ return_value_phi->AddInput(last->InputAt(0));
+ } else if (return_value == nullptr) {
+ return_value = last->InputAt(0);
+ } else {
+ // There will be multiple returns.
+ return_value_phi = new (allocator) HPhi(
+ allocator, kNoRegNumber, 0, HPhi::ToPhiType(invoke->GetType()), to->GetDexPc());
+ to->AddPhi(return_value_phi);
+ return_value_phi->AddInput(return_value);
+ return_value_phi->AddInput(last->InputAt(0));
+ return_value = return_value_phi;
+ }
}
predecessor->AddInstruction(new (allocator) HGoto(last->GetDexPc()));
predecessor->RemoveInstruction(last);
}
}
+ if (rerun_loop_analysis) {
+ outer_graph->ClearLoopInformation();
+ outer_graph->ClearDominanceInformation();
+ outer_graph->BuildDominatorTree();
+ } else if (rerun_dominance) {
+ outer_graph->ClearDominanceInformation();
+ outer_graph->ComputeDominanceInformation();
+ }
}
// Walk over the entry block and: