Step 1 of 2: conditional passes.

Rationale:
The change adds a return value to Run() in preparation of
conditional pass execution. The value returned by Run() is
best effort, returning false means no optimizations were
applied or no useful information was obtained. I filled
in a few cases with more exact information, others
still just return true. In addition, it integrates inlining
as a regular pass, avoiding the ugly "break" into
optimizations1 and optimziations2.

Bug: b/78171933, b/74026074

Test: test-art-host,target
Change-Id: Ia39c5c83c01dcd79841e4b623917d61c754cf075
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index d893cc8..dfefa52 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -1938,9 +1938,9 @@
   DISALLOW_COPY_AND_ASSIGN(BCEVisitor);
 };
 
-void BoundsCheckElimination::Run() {
+bool BoundsCheckElimination::Run() {
   if (!graph_->HasBoundsChecks()) {
-    return;
+    return false;
   }
 
   // Reverse post order guarantees a node's dominators are visited first.
@@ -1968,6 +1968,8 @@
 
   // Perform cleanup.
   visitor.Finish();
+
+  return true;
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/bounds_check_elimination.h b/compiler/optimizing/bounds_check_elimination.h
index 79c67a8..92ab798 100644
--- a/compiler/optimizing/bounds_check_elimination.h
+++ b/compiler/optimizing/bounds_check_elimination.h
@@ -34,7 +34,7 @@
         side_effects_(side_effects),
         induction_analysis_(induction_analysis) {}
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
   static constexpr const char* kBoundsCheckEliminationPassName = "BCE";
 
diff --git a/compiler/optimizing/cha_guard_optimization.cc b/compiler/optimizing/cha_guard_optimization.cc
index 3addaee..bdc395b 100644
--- a/compiler/optimizing/cha_guard_optimization.cc
+++ b/compiler/optimizing/cha_guard_optimization.cc
@@ -241,14 +241,15 @@
   GetGraph()->IncrementNumberOfCHAGuards();
 }
 
-void CHAGuardOptimization::Run() {
+bool CHAGuardOptimization::Run() {
   if (graph_->GetNumberOfCHAGuards() == 0) {
-    return;
+    return false;
   }
   CHAGuardVisitor visitor(graph_);
   for (HBasicBlock* block : graph_->GetReversePostOrder()) {
     visitor.VisitBasicBlock(block);
   }
+  return true;
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/cha_guard_optimization.h b/compiler/optimizing/cha_guard_optimization.h
index f14e07b..d2c5a34 100644
--- a/compiler/optimizing/cha_guard_optimization.h
+++ b/compiler/optimizing/cha_guard_optimization.h
@@ -30,7 +30,7 @@
                                 const char* name = kCHAGuardOptimizationPassName)
       : HOptimization(graph, name) {}
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
   static constexpr const char* kCHAGuardOptimizationPassName = "cha_guard_optimization";
 
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc
index 2e31d35..d6c9755 100644
--- a/compiler/optimizing/code_sinking.cc
+++ b/compiler/optimizing/code_sinking.cc
@@ -25,11 +25,11 @@
 
 namespace art {
 
-void CodeSinking::Run() {
+bool CodeSinking::Run() {
   HBasicBlock* exit = graph_->GetExitBlock();
   if (exit == nullptr) {
     // Infinite loop, just bail.
-    return;
+    return false;
   }
   // TODO(ngeoffray): we do not profile branches yet, so use throw instructions
   // as an indicator of an uncommon branch.
@@ -40,6 +40,7 @@
       SinkCodeToUncommonBranch(exit_predecessor);
     }
   }
+  return true;
 }
 
 static bool IsInterestingInstruction(HInstruction* instruction) {
diff --git a/compiler/optimizing/code_sinking.h b/compiler/optimizing/code_sinking.h
index 836d9d4..5db0b6d 100644
--- a/compiler/optimizing/code_sinking.h
+++ b/compiler/optimizing/code_sinking.h
@@ -33,7 +33,7 @@
               const char* name = kCodeSinkingPassName)
       : HOptimization(graph, name, stats) {}
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
   static constexpr const char* kCodeSinkingPassName = "code_sinking";
 
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index 6f11e62..bb78c23 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -68,13 +68,14 @@
 };
 
 
-void HConstantFolding::Run() {
+bool HConstantFolding::Run() {
   HConstantFoldingVisitor visitor(graph_);
   // Process basic blocks in reverse post-order in the dominator tree,
   // so that an instruction turned into a constant, used as input of
   // another instruction, may possibly be used to turn that second
   // instruction into a constant as well.
   visitor.VisitReversePostOrder();
+  return true;
 }
 
 
diff --git a/compiler/optimizing/constant_folding.h b/compiler/optimizing/constant_folding.h
index 05c6df4..f4dbc80 100644
--- a/compiler/optimizing/constant_folding.h
+++ b/compiler/optimizing/constant_folding.h
@@ -41,7 +41,7 @@
  public:
   HConstantFolding(HGraph* graph, const char* name) : HOptimization(graph, name) {}
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
   static constexpr const char* kConstantFoldingPassName = "constant_folding";
 
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
index 4a66cd2..1a7f926 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
@@ -250,13 +250,14 @@
   DISALLOW_COPY_AND_ASSIGN(CFREVisitor);
 };
 
-void ConstructorFenceRedundancyElimination::Run() {
+bool ConstructorFenceRedundancyElimination::Run() {
   CFREVisitor cfre_visitor(graph_, stats_);
 
   // Arbitrarily visit in reverse-post order.
   // The exact block visit order does not matter, as the algorithm
   // only operates on a single block at a time.
   cfre_visitor.VisitReversePostOrder();
+  return true;
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.h b/compiler/optimizing/constructor_fence_redundancy_elimination.h
index f4b06d5..367d9f2 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.h
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.h
@@ -52,7 +52,7 @@
                                         const char* name = kCFREPassName)
       : HOptimization(graph, name, stats) {}
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
   static constexpr const char* kCFREPassName = "constructor_fence_redundancy_elimination";
 
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 9fa0f72..1dc1094 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -508,7 +508,7 @@
   }
 }
 
-void HDeadCodeElimination::Run() {
+bool HDeadCodeElimination::Run() {
   // Do not eliminate dead blocks if the graph has irreducible loops. We could
   // support it, but that would require changes in our loop representation to handle
   // multiple entry points. We decided it was not worth the complexity.
@@ -526,6 +526,7 @@
   }
   SsaRedundantPhiElimination(graph_).Run();
   RemoveDeadInstructions();
+  return true;
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/dead_code_elimination.h b/compiler/optimizing/dead_code_elimination.h
index 92a7f56..90caa53 100644
--- a/compiler/optimizing/dead_code_elimination.h
+++ b/compiler/optimizing/dead_code_elimination.h
@@ -32,7 +32,8 @@
   HDeadCodeElimination(HGraph* graph, OptimizingCompilerStats* stats, const char* name)
       : HOptimization(graph, name, stats) {}
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
+
   static constexpr const char* kDeadCodeEliminationPassName = "dead_code_elimination";
 
  private:
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index f05159b..4863718 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -352,7 +352,7 @@
     visited_blocks_.ClearAllBits();
   }
 
-  void Run();
+  bool Run();
 
  private:
   // Per-block GVN. Will also update the ValueSet of the dominated and
@@ -397,7 +397,7 @@
   DISALLOW_COPY_AND_ASSIGN(GlobalValueNumberer);
 };
 
-void GlobalValueNumberer::Run() {
+bool GlobalValueNumberer::Run() {
   DCHECK(side_effects_.HasRun());
   sets_[graph_->GetEntryBlock()->GetBlockId()] = new (&allocator_) ValueSet(&allocator_);
 
@@ -406,6 +406,7 @@
   for (HBasicBlock* block : graph_->GetReversePostOrder()) {
     VisitBasicBlock(block);
   }
+  return true;
 }
 
 void GlobalValueNumberer::VisitBasicBlock(HBasicBlock* block) {
@@ -557,9 +558,9 @@
   return secondary_match;
 }
 
-void GVNOptimization::Run() {
+bool GVNOptimization::Run() {
   GlobalValueNumberer gvn(graph_, side_effects_);
-  gvn.Run();
+  return gvn.Run();
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/gvn.h b/compiler/optimizing/gvn.h
index 4fdba26..75cfff2 100644
--- a/compiler/optimizing/gvn.h
+++ b/compiler/optimizing/gvn.h
@@ -31,7 +31,7 @@
                   const char* pass_name = kGlobalValueNumberingPassName)
       : HOptimization(graph, pass_name), side_effects_(side_effects) {}
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
   static constexpr const char* kGlobalValueNumberingPassName = "GVN";
 
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index d270c6a..a4d638f 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -243,7 +243,7 @@
               graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)) {
 }
 
-void HInductionVarAnalysis::Run() {
+bool HInductionVarAnalysis::Run() {
   // Detects sequence variables (generalized induction variables) during an outer to inner
   // traversal of all loops using Gerlek's algorithm. The order is important to enable
   // range analysis on outer loop while visiting inner loops.
@@ -253,6 +253,7 @@
       VisitLoop(graph_block->GetLoopInformation());
     }
   }
+  return !induction_.empty();
 }
 
 void HInductionVarAnalysis::VisitLoop(HLoopInformation* loop) {
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index acad77d..89fed2e 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -37,7 +37,7 @@
  public:
   explicit HInductionVarAnalysis(HGraph* graph, const char* name = kInductionPassName);
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
   static constexpr const char* kInductionPassName = "induction_var_analysis";
 
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 8b10a78..3800c96 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -124,13 +124,18 @@
   }
 }
 
-void HInliner::Run() {
-  if (graph_->IsDebuggable()) {
+bool HInliner::Run() {
+  if (compiler_driver_->GetCompilerOptions().GetInlineMaxCodeUnits() == 0) {
+    // Inlining effectively disabled.
+    return false;
+  } else if (graph_->IsDebuggable()) {
     // For simplicity, we currently never inline when the graph is debuggable. This avoids
     // doing some logic in the runtime to discover if a method could have been inlined.
-    return;
+    return false;
   }
 
+  bool didInline = false;
+
   // Initialize the number of instructions for the method being compiled. Recursive calls
   // to HInliner::Run have already updated the instruction count.
   if (outermost_graph_ == graph_) {
@@ -171,7 +176,9 @@
               call->GetDexMethodIndex(), /* with_signature */ false);
           // Tests prevent inlining by having $noinline$ in their method names.
           if (callee_name.find("$noinline$") == std::string::npos) {
-            if (!TryInline(call) && honor_inline_directives) {
+            if (TryInline(call)) {
+              didInline = true;
+            } else {
               bool should_have_inlined = (callee_name.find("$inline$") != std::string::npos);
               CHECK(!should_have_inlined) << "Could not inline " << callee_name;
             }
@@ -179,12 +186,16 @@
         } else {
           DCHECK(!honor_inline_directives);
           // Normal case: try to inline.
-          TryInline(call);
+          if (TryInline(call)) {
+            didInline = true;
+          }
         }
       }
       instruction = next;
     }
   }
+
+  return didInline;
 }
 
 static bool IsMethodOrDeclaringClassFinal(ArtMethod* method)
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 02465d3..fb1c9af 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -60,7 +60,7 @@
         handles_(handles),
         inline_stats_(nullptr) {}
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
   static constexpr const char* kInlinerPassName = "inliner";
 
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index d3cf956..0fe1672 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -42,7 +42,7 @@
         compiler_driver_(compiler_driver),
         stats_(stats) {}
 
-  void Run();
+  bool Run();
 
  private:
   void RecordSimplification() {
@@ -136,17 +136,18 @@
   static constexpr int kMaxSamePositionSimplifications = 50;
 };
 
-void InstructionSimplifier::Run() {
+bool InstructionSimplifier::Run() {
   if (kTestInstructionClonerExhaustively) {
     CloneAndReplaceInstructionVisitor visitor(graph_);
     visitor.VisitReversePostOrder();
   }
 
   InstructionSimplifierVisitor visitor(graph_, codegen_, compiler_driver_, stats_);
-  visitor.Run();
+  return visitor.Run();
 }
 
-void InstructionSimplifierVisitor::Run() {
+bool InstructionSimplifierVisitor::Run() {
+  bool didSimplify = false;
   // Iterate in reverse post order to open up more simplifications to users
   // of instructions that got simplified.
   for (HBasicBlock* block : GetGraph()->GetReversePostOrder()) {
@@ -156,10 +157,14 @@
     do {
       simplification_occurred_ = false;
       VisitBasicBlock(block);
+      if (simplification_occurred_) {
+        didSimplify = true;
+      }
     } while (simplification_occurred_ &&
              (simplifications_at_current_position_ < kMaxSamePositionSimplifications));
     simplifications_at_current_position_ = 0;
   }
+  return didSimplify;
 }
 
 namespace {
diff --git a/compiler/optimizing/instruction_simplifier.h b/compiler/optimizing/instruction_simplifier.h
index 5e20455..f409e87 100644
--- a/compiler/optimizing/instruction_simplifier.h
+++ b/compiler/optimizing/instruction_simplifier.h
@@ -49,7 +49,7 @@
 
   static constexpr const char* kInstructionSimplifierPassName = "instruction_simplifier";
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
  private:
   CodeGenerator* codegen_;
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index 92081e3..37fcdb9 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -283,9 +283,10 @@
   }
 }
 
-void InstructionSimplifierArm::Run() {
+bool InstructionSimplifierArm::Run() {
   InstructionSimplifierArmVisitor visitor(graph_, stats_);
   visitor.VisitReversePostOrder();
+  return true;
 }
 
 }  // namespace arm
diff --git a/compiler/optimizing/instruction_simplifier_arm.h b/compiler/optimizing/instruction_simplifier_arm.h
index 2f65729..f1a16ef 100644
--- a/compiler/optimizing/instruction_simplifier_arm.h
+++ b/compiler/optimizing/instruction_simplifier_arm.h
@@ -30,7 +30,7 @@
 
   static constexpr const char* kInstructionSimplifierArmPassName = "instruction_simplifier_arm";
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 };
 
 }  // namespace arm
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index 1c44e5a..e0a6279 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -278,9 +278,10 @@
   }
 }
 
-void InstructionSimplifierArm64::Run() {
+bool InstructionSimplifierArm64::Run() {
   InstructionSimplifierArm64Visitor visitor(graph_, stats_);
   visitor.VisitReversePostOrder();
+  return true;
 }
 
 }  // namespace arm64
diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h
index d180a8d..8659c1f 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.h
+++ b/compiler/optimizing/instruction_simplifier_arm64.h
@@ -30,7 +30,7 @@
 
   static constexpr const char* kInstructionSimplifierArm64PassName = "instruction_simplifier_arm64";
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 };
 
 }  // namespace arm64
diff --git a/compiler/optimizing/instruction_simplifier_mips.cc b/compiler/optimizing/instruction_simplifier_mips.cc
index fa97401..3bdf90f 100644
--- a/compiler/optimizing/instruction_simplifier_mips.cc
+++ b/compiler/optimizing/instruction_simplifier_mips.cc
@@ -131,9 +131,10 @@
   }
 }
 
-void InstructionSimplifierMips::Run() {
+bool InstructionSimplifierMips::Run() {
   InstructionSimplifierMipsVisitor visitor(graph_, codegen_, stats_);
   visitor.VisitReversePostOrder();
+  return true;
 }
 
 }  // namespace mips
diff --git a/compiler/optimizing/instruction_simplifier_mips.h b/compiler/optimizing/instruction_simplifier_mips.h
index 6cb8aff..94ef73d 100644
--- a/compiler/optimizing/instruction_simplifier_mips.h
+++ b/compiler/optimizing/instruction_simplifier_mips.h
@@ -35,7 +35,7 @@
 
   static constexpr const char* kInstructionSimplifierMipsPassName = "instruction_simplifier_mips";
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
  private:
   CodeGeneratorMIPS* codegen_;
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index f8dc316..dfe6d79 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -178,7 +178,8 @@
   return true;
 }
 
-void IntrinsicsRecognizer::Run() {
+bool IntrinsicsRecognizer::Run() {
+  bool didRecognize = false;
   ScopedObjectAccess soa(Thread::Current());
   for (HBasicBlock* block : graph_->GetReversePostOrder()) {
     for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
@@ -187,6 +188,7 @@
       if (inst->IsInvoke()) {
         bool wrong_invoke_type = false;
         if (Recognize(inst->AsInvoke(), /* art_method */ nullptr, &wrong_invoke_type)) {
+          didRecognize = true;
           MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized);
         } else if (wrong_invoke_type) {
           LOG(WARNING)
@@ -197,6 +199,7 @@
       }
     }
   }
+  return didRecognize;
 }
 
 std::ostream& operator<<(std::ostream& os, const Intrinsics& intrinsic) {
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 1035cbc..30cffac 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -42,7 +42,7 @@
                        const char* name = kIntrinsicsRecognizerPassName)
       : HOptimization(graph, name, stats) {}
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
   // Static helper that recognizes intrinsic call. Returns true on success.
   // If it fails due to invoke type mismatch, wrong_invoke_type is set.
diff --git a/compiler/optimizing/licm.cc b/compiler/optimizing/licm.cc
index d3a0376..0edb23b 100644
--- a/compiler/optimizing/licm.cc
+++ b/compiler/optimizing/licm.cc
@@ -78,7 +78,8 @@
   }
 }
 
-void LICM::Run() {
+bool LICM::Run() {
+  bool didLICM = false;
   DCHECK(side_effects_.HasRun());
 
   // Only used during debug.
@@ -157,6 +158,7 @@
           }
           instruction->MoveBefore(pre_header->GetLastInstruction());
           MaybeRecordStat(stats_, MethodCompilationStat::kLoopInvariantMoved);
+          didLICM = true;
         }
 
         if (!can_move && (instruction->CanThrow() || instruction->DoesAnyWrite())) {
@@ -167,6 +169,7 @@
       }
     }
   }
+  return didLICM;
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/licm.h b/compiler/optimizing/licm.h
index ee567ae..f72d195 100644
--- a/compiler/optimizing/licm.h
+++ b/compiler/optimizing/licm.h
@@ -33,7 +33,7 @@
       : HOptimization(graph, name, stats),
         side_effects_(side_effects) {}
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
   static constexpr const char* kLoopInvariantCodeMotionPassName = "licm";
 
diff --git a/compiler/optimizing/load_store_analysis.cc b/compiler/optimizing/load_store_analysis.cc
index 8b1812a..7d7bb94 100644
--- a/compiler/optimizing/load_store_analysis.cc
+++ b/compiler/optimizing/load_store_analysis.cc
@@ -152,7 +152,7 @@
   return true;
 }
 
-void LoadStoreAnalysis::Run() {
+bool LoadStoreAnalysis::Run() {
   for (HBasicBlock* block : graph_->GetReversePostOrder()) {
     heap_location_collector_.VisitBasicBlock(block);
   }
@@ -160,22 +160,23 @@
   if (heap_location_collector_.GetNumberOfHeapLocations() > kMaxNumberOfHeapLocations) {
     // Bail out if there are too many heap locations to deal with.
     heap_location_collector_.CleanUp();
-    return;
+    return false;
   }
   if (!heap_location_collector_.HasHeapStores()) {
     // Without heap stores, this pass would act mostly as GVN on heap accesses.
     heap_location_collector_.CleanUp();
-    return;
+    return false;
   }
   if (heap_location_collector_.HasVolatile() || heap_location_collector_.HasMonitorOps()) {
     // Don't do load/store elimination if the method has volatile field accesses or
     // monitor operations, for now.
     // TODO: do it right.
     heap_location_collector_.CleanUp();
-    return;
+    return false;
   }
 
   heap_location_collector_.BuildAliasingMatrix();
+  return true;
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h
index 437e6be..f84846d 100644
--- a/compiler/optimizing/load_store_analysis.h
+++ b/compiler/optimizing/load_store_analysis.h
@@ -572,7 +572,7 @@
     return heap_location_collector_;
   }
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
   static constexpr const char* kLoadStoreAnalysisPassName = "load_store_analysis";
 
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 237ecd3..d598ff5 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -948,22 +948,22 @@
   DISALLOW_COPY_AND_ASSIGN(LSEVisitor);
 };
 
-void LoadStoreElimination::Run() {
+bool LoadStoreElimination::Run() {
   if (graph_->IsDebuggable() || graph_->HasTryCatch()) {
     // Debugger may set heap values or trigger deoptimization of callers.
     // Try/catch support not implemented yet.
     // Skip this optimization.
-    return;
+    return false;
   }
   const HeapLocationCollector& heap_location_collector = lsa_.GetHeapLocationCollector();
   if (heap_location_collector.GetNumberOfHeapLocations() == 0) {
     // No HeapLocation information from LSA, skip this optimization.
-    return;
+    return false;
   }
 
   // TODO: analyze VecLoad/VecStore better.
   if (graph_->HasSIMD()) {
-    return;
+    return false;
   }
 
   LSEVisitor lse_visitor(graph_, heap_location_collector, side_effects_, stats_);
@@ -971,6 +971,7 @@
     lse_visitor.VisitBasicBlock(block);
   }
   lse_visitor.RemoveInstructions();
+  return true;
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/load_store_elimination.h b/compiler/optimizing/load_store_elimination.h
index 7153541..408386b 100644
--- a/compiler/optimizing/load_store_elimination.h
+++ b/compiler/optimizing/load_store_elimination.h
@@ -35,7 +35,7 @@
         side_effects_(side_effects),
         lsa_(lsa) {}
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
   static constexpr const char* kLoadStoreEliminationPassName = "load_store_elimination";
 
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 1462404..7f1b319 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -608,11 +608,11 @@
                                                       global_allocator_)) {
 }
 
-void HLoopOptimization::Run() {
+bool HLoopOptimization::Run() {
   // Skip if there is no loop or the graph has try-catch/irreducible loops.
   // TODO: make this less of a sledgehammer.
   if (!graph_->HasLoops() || graph_->HasTryCatch() || graph_->HasIrreducibleLoops()) {
-    return;
+    return false;
   }
 
   // Phase-local allocator.
@@ -620,7 +620,7 @@
   loop_allocator_ = &allocator;
 
   // Perform loop optimizations.
-  LocalRun();
+  bool didLoopOpt = LocalRun();
   if (top_loop_ == nullptr) {
     graph_->SetHasLoops(false);  // no more loops
   }
@@ -628,13 +628,16 @@
   // Detach.
   loop_allocator_ = nullptr;
   last_loop_ = top_loop_ = nullptr;
+
+  return didLoopOpt;
 }
 
 //
 // Loop setup and traversal.
 //
 
-void HLoopOptimization::LocalRun() {
+bool HLoopOptimization::LocalRun() {
+  bool didLoopOpt = false;
   // Build the linear order using the phase-local allocator. This step enables building
   // a loop hierarchy that properly reflects the outer-inner and previous-next relation.
   ScopedArenaVector<HBasicBlock*> linear_order(loop_allocator_->Adapter(kArenaAllocLinearOrder));
@@ -666,7 +669,7 @@
     vector_map_ = &map;
     vector_permanent_map_ = &perm;
     // Traverse.
-    TraverseLoopsInnerToOuter(top_loop_);
+    didLoopOpt = TraverseLoopsInnerToOuter(top_loop_);
     // Detach.
     iset_ = nullptr;
     reductions_ = nullptr;
@@ -674,6 +677,7 @@
     vector_map_ = nullptr;
     vector_permanent_map_ = nullptr;
   }
+  return didLoopOpt;
 }
 
 void HLoopOptimization::AddLoop(HLoopInformation* loop_info) {
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index f9a31a3..11e9698 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -43,7 +43,7 @@
                     OptimizingCompilerStats* stats,
                     const char* name = kLoopOptimizationPassName);
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
   static constexpr const char* kLoopOptimizationPassName = "loop_optimization";
 
@@ -123,7 +123,7 @@
   // Loop setup and traversal.
   //
 
-  void LocalRun();
+  bool LocalRun();
   void AddLoop(HLoopInformation* loop_info);
   void RemoveLoop(LoopNode* node);
 
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index c170f15..b00d686 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -47,8 +47,9 @@
   // 'instruction_simplifier$before_codegen'.
   const char* GetPassName() const { return pass_name_; }
 
-  // Perform the analysis itself.
-  virtual void Run() = 0;
+  // Perform the pass or analysis. Returns false if no optimizations occurred or no useful
+  // information was computed (this is best effort, returning true is always ok).
+  virtual bool Run() = 0;
 
  protected:
   HGraph* const graph_;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index cadefc3..f68bcbe 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -294,7 +294,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
-  void RunOptimizations(HGraph* graph,
+  bool RunOptimizations(HGraph* graph,
                         CodeGenerator* codegen,
                         const DexCompilationUnit& dex_compilation_unit,
                         PassObserver* pass_observer,
@@ -314,20 +314,22 @@
         handles);
     DCHECK_EQ(length, optimizations.size());
     // Run the optimization passes one by one.
+    bool change = false;
     for (size_t i = 0; i < length; ++i) {
       PassScope scope(optimizations[i]->GetPassName(), pass_observer);
-      optimizations[i]->Run();
+      change |= optimizations[i]->Run();
     }
+    return change;
   }
 
-  template <size_t length> void RunOptimizations(
+  template <size_t length> bool RunOptimizations(
       HGraph* graph,
       CodeGenerator* codegen,
       const DexCompilationUnit& dex_compilation_unit,
       PassObserver* pass_observer,
       VariableSizedHandleScope* handles,
       const OptimizationDef (&definitions)[length]) const {
-    RunOptimizations(
+    return RunOptimizations(
         graph, codegen, dex_compilation_unit, pass_observer, handles, definitions, length);
   }
 
@@ -366,13 +368,7 @@
                                      ArtMethod* method,
                                      VariableSizedHandleScope* handles) const;
 
-  void MaybeRunInliner(HGraph* graph,
-                       CodeGenerator* codegen,
-                       const DexCompilationUnit& dex_compilation_unit,
-                       PassObserver* pass_observer,
-                       VariableSizedHandleScope* handles) const;
-
-  void RunArchOptimizations(HGraph* graph,
+  bool RunArchOptimizations(HGraph* graph,
                             CodeGenerator* codegen,
                             const DexCompilationUnit& dex_compilation_unit,
                             PassObserver* pass_observer,
@@ -435,28 +431,7 @@
       || instruction_set == InstructionSet::kX86_64;
 }
 
-void OptimizingCompiler::MaybeRunInliner(HGraph* graph,
-                                         CodeGenerator* codegen,
-                                         const DexCompilationUnit& dex_compilation_unit,
-                                         PassObserver* pass_observer,
-                                         VariableSizedHandleScope* handles) const {
-  const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
-  bool should_inline = (compiler_options.GetInlineMaxCodeUnits() > 0);
-  if (!should_inline) {
-    return;
-  }
-  OptimizationDef optimizations[] = {
-    OptDef(OptimizationPass::kInliner)
-  };
-  RunOptimizations(graph,
-                   codegen,
-                   dex_compilation_unit,
-                   pass_observer,
-                   handles,
-                   optimizations);
-}
-
-void OptimizingCompiler::RunArchOptimizations(HGraph* graph,
+bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
                                               CodeGenerator* codegen,
                                               const DexCompilationUnit& dex_compilation_unit,
                                               PassObserver* pass_observer,
@@ -471,13 +446,12 @@
         OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
         OptDef(OptimizationPass::kScheduling)
       };
-      RunOptimizations(graph,
-                       codegen,
-                       dex_compilation_unit,
-                       pass_observer,
-                       handles,
-                       arm_optimizations);
-      break;
+      return RunOptimizations(graph,
+                              codegen,
+                              dex_compilation_unit,
+                              pass_observer,
+                              handles,
+                              arm_optimizations);
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_arm64
@@ -488,13 +462,12 @@
         OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
         OptDef(OptimizationPass::kScheduling)
       };
-      RunOptimizations(graph,
-                       codegen,
-                       dex_compilation_unit,
-                       pass_observer,
-                       handles,
-                       arm64_optimizations);
-      break;
+      return RunOptimizations(graph,
+                              codegen,
+                              dex_compilation_unit,
+                              pass_observer,
+                              handles,
+                              arm64_optimizations);
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
@@ -505,13 +478,12 @@
         OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
         OptDef(OptimizationPass::kPcRelativeFixupsMips)
       };
-      RunOptimizations(graph,
-                       codegen,
-                       dex_compilation_unit,
-                       pass_observer,
-                       handles,
-                       mips_optimizations);
-      break;
+      return RunOptimizations(graph,
+                              codegen,
+                              dex_compilation_unit,
+                              pass_observer,
+                              handles,
+                              mips_optimizations);
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
@@ -520,13 +492,12 @@
         OptDef(OptimizationPass::kSideEffectsAnalysis),
         OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch")
       };
-      RunOptimizations(graph,
-                       codegen,
-                       dex_compilation_unit,
-                       pass_observer,
-                       handles,
-                       mips64_optimizations);
-      break;
+      return RunOptimizations(graph,
+                              codegen,
+                              dex_compilation_unit,
+                              pass_observer,
+                              handles,
+                              mips64_optimizations);
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86
@@ -537,13 +508,12 @@
         OptDef(OptimizationPass::kPcRelativeFixupsX86),
         OptDef(OptimizationPass::kX86MemoryOperandGeneration)
       };
-      RunOptimizations(graph,
-                       codegen,
-                       dex_compilation_unit,
-                       pass_observer,
-                       handles,
-                       x86_optimizations);
-      break;
+      return RunOptimizations(graph,
+                              codegen,
+                              dex_compilation_unit,
+                              pass_observer,
+                              handles,
+                              x86_optimizations);
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
@@ -553,17 +523,16 @@
         OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
         OptDef(OptimizationPass::kX86MemoryOperandGeneration)
       };
-      RunOptimizations(graph,
-                       codegen,
-                       dex_compilation_unit,
-                       pass_observer,
-                       handles,
-                       x86_64_optimizations);
-      break;
+      return RunOptimizations(graph,
+                              codegen,
+                              dex_compilation_unit,
+                              pass_observer,
+                              handles,
+                              x86_64_optimizations);
     }
 #endif
     default:
-      break;
+      return false;
   }
 }
 
@@ -626,23 +595,13 @@
     return;
   }
 
-  OptimizationDef optimizations1[] = {
+  OptimizationDef optimizations[] = {
     OptDef(OptimizationPass::kIntrinsicsRecognizer),
     OptDef(OptimizationPass::kSharpening),
     OptDef(OptimizationPass::kConstantFolding),
     OptDef(OptimizationPass::kInstructionSimplifier),
-    OptDef(OptimizationPass::kDeadCodeElimination, "dead_code_elimination$initial")
-  };
-  RunOptimizations(graph,
-                   codegen,
-                   dex_compilation_unit,
-                   pass_observer,
-                   handles,
-                   optimizations1);
-
-  MaybeRunInliner(graph, codegen, dex_compilation_unit, pass_observer, handles);
-
-  OptimizationDef optimizations2[] = {
+    OptDef(OptimizationPass::kDeadCodeElimination, "dead_code_elimination$initial"),
+    OptDef(OptimizationPass::kInliner),
     OptDef(OptimizationPass::kSideEffectsAnalysis,   "side_effects$before_gvn"),
     OptDef(OptimizationPass::kGlobalValueNumbering),
     OptDef(OptimizationPass::kSelectGenerator),
@@ -676,7 +635,7 @@
                    dex_compilation_unit,
                    pass_observer,
                    handles,
-                   optimizations2);
+                   optimizations);
 
   RunArchOptimizations(graph, codegen, dex_compilation_unit, pass_observer, handles);
 }
diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc
index 0102254..f18ecc1 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.cc
+++ b/compiler/optimizing/pc_relative_fixups_mips.cc
@@ -128,20 +128,21 @@
   HMipsComputeBaseMethodAddress* base_;
 };
 
-void PcRelativeFixups::Run() {
+bool PcRelativeFixups::Run() {
   CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen_);
   if (mips_codegen->GetInstructionSetFeatures().IsR6()) {
     // Do nothing for R6 because it has PC-relative addressing.
-    return;
+    return false;
   }
   if (graph_->HasIrreducibleLoops()) {
     // Do not run this optimization, as irreducible loops do not work with an instruction
     // that can be live-in at the irreducible loop header.
-    return;
+    return false;
   }
   PCRelativeHandlerVisitor visitor(graph_, codegen_);
   visitor.VisitInsertionOrder();
   visitor.MoveBaseIfNeeded();
+  return true;
 }
 
 }  // namespace mips
diff --git a/compiler/optimizing/pc_relative_fixups_mips.h b/compiler/optimizing/pc_relative_fixups_mips.h
index ec2c711..6dd1ee0 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.h
+++ b/compiler/optimizing/pc_relative_fixups_mips.h
@@ -34,7 +34,7 @@
 
   static constexpr const char* kPcRelativeFixupsMipsPassName = "pc_relative_fixups_mips";
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
  private:
   CodeGenerator* codegen_;
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index 647336b..9049457 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -256,10 +256,11 @@
   HX86ComputeBaseMethodAddress* base_;
 };
 
-void PcRelativeFixups::Run() {
+bool PcRelativeFixups::Run() {
   PCRelativeHandlerVisitor visitor(graph_, codegen_);
   visitor.VisitInsertionOrder();
   visitor.MoveBaseIfNeeded();
+  return true;
 }
 
 }  // namespace x86
diff --git a/compiler/optimizing/pc_relative_fixups_x86.h b/compiler/optimizing/pc_relative_fixups_x86.h
index 72fa71e..db56b7f 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.h
+++ b/compiler/optimizing/pc_relative_fixups_x86.h
@@ -34,7 +34,7 @@
 
   static constexpr const char* kPcRelativeFixupsX86PassName  = "pc_relative_fixups_x86";
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
  private:
   CodeGenerator* codegen_;
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 4030883..c47c69a 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -348,7 +348,7 @@
   }
 }
 
-void ReferenceTypePropagation::Run() {
+bool ReferenceTypePropagation::Run() {
   RTPVisitor visitor(graph_, class_loader_, hint_dex_cache_, &handle_cache_, is_first_run_);
 
   // To properly propagate type info we need to visit in the dominator-based order.
@@ -360,6 +360,7 @@
 
   visitor.ProcessWorklist();
   ValidateTypes();
+  return true;
 }
 
 void ReferenceTypePropagation::RTPVisitor::VisitBasicBlock(HBasicBlock* block) {
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index fd4dad2..400852f 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -40,7 +40,7 @@
   // Visit a single instruction.
   void Visit(HInstruction* instruction);
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
   // Returns true if klass is admissible to the propagation: non-null and resolved.
   // For an array type, we also check if the component type is admissible.
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index bca538f..e014efa 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -774,7 +774,7 @@
       instr->IsSuspendCheck();
 }
 
-void HInstructionScheduling::Run(bool only_optimize_loop_blocks,
+bool HInstructionScheduling::Run(bool only_optimize_loop_blocks,
                                  bool schedule_randomly) {
 #if defined(ART_ENABLE_CODEGEN_arm64) || defined(ART_ENABLE_CODEGEN_arm)
   // Phase-local allocator that allocates scheduler internal data structures like
@@ -814,6 +814,7 @@
     default:
       break;
   }
+  return true;
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index dfa077f..51cd20a 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -508,10 +508,11 @@
         codegen_(cg),
         instruction_set_(instruction_set) {}
 
-  void Run() {
-    Run(/*only_optimize_loop_blocks*/ true, /*schedule_randomly*/ false);
+  bool Run() OVERRIDE {
+    return Run(/*only_optimize_loop_blocks*/ true, /*schedule_randomly*/ false);
   }
-  void Run(bool only_optimize_loop_blocks, bool schedule_randomly);
+
+  bool Run(bool only_optimize_loop_blocks, bool schedule_randomly);
 
   static constexpr const char* kInstructionSchedulingPassName = "scheduler";
 
diff --git a/compiler/optimizing/select_generator.cc b/compiler/optimizing/select_generator.cc
index f9acf5a..0d0f7cc 100644
--- a/compiler/optimizing/select_generator.cc
+++ b/compiler/optimizing/select_generator.cc
@@ -90,7 +90,8 @@
   return select_phi;
 }
 
-void HSelectGenerator::Run() {
+bool HSelectGenerator::Run() {
+  bool didSelect = false;
   // Select cache with local allocator.
   ScopedArenaAllocator allocator(graph_->GetArenaStack());
   ScopedArenaSafeMap<HInstruction*, HSelect*> cache(
@@ -211,7 +212,9 @@
     // entry block. Any following blocks would have had the join block
     // as a dominator, and `MergeWith` handles changing that to the
     // entry block.
+    didSelect = true;
   }
+  return didSelect;
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/select_generator.h b/compiler/optimizing/select_generator.h
index bda57fd..d24d226 100644
--- a/compiler/optimizing/select_generator.h
+++ b/compiler/optimizing/select_generator.h
@@ -68,7 +68,7 @@
                    OptimizingCompilerStats* stats,
                    const char* name = kSelectGeneratorPassName);
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
   static constexpr const char* kSelectGeneratorPassName = "select_generator";
 
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 70b4576..6541043 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -36,7 +36,7 @@
 
 namespace art {
 
-void HSharpening::Run() {
+bool HSharpening::Run() {
   // We don't care about the order of the blocks here.
   for (HBasicBlock* block : graph_->GetReversePostOrder()) {
     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
@@ -51,6 +51,7 @@
       //       because we know the type better when inlining.
     }
   }
+  return true;
 }
 
 static bool IsInBootImage(ArtMethod* method) {
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index fa3e948..9ccbcaf 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -40,7 +40,7 @@
         codegen_(codegen),
         compiler_driver_(compiler_driver) { }
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
   static constexpr const char* kSharpeningPassName = "sharpening";
 
diff --git a/compiler/optimizing/side_effects_analysis.cc b/compiler/optimizing/side_effects_analysis.cc
index 6d82e8e..ba97b43 100644
--- a/compiler/optimizing/side_effects_analysis.cc
+++ b/compiler/optimizing/side_effects_analysis.cc
@@ -18,7 +18,7 @@
 
 namespace art {
 
-void SideEffectsAnalysis::Run() {
+bool SideEffectsAnalysis::Run() {
   // Inlining might have created more blocks, so we need to increase the size
   // if needed.
   block_effects_.resize(graph_->GetBlocks().size());
@@ -69,6 +69,7 @@
     }
   }
   has_run_ = true;
+  return true;
 }
 
 SideEffects SideEffectsAnalysis::GetLoopEffects(HBasicBlock* block) const {
diff --git a/compiler/optimizing/side_effects_analysis.h b/compiler/optimizing/side_effects_analysis.h
index c0f81a9..56a01e6 100644
--- a/compiler/optimizing/side_effects_analysis.h
+++ b/compiler/optimizing/side_effects_analysis.h
@@ -37,7 +37,7 @@
   SideEffects GetBlockEffects(HBasicBlock* block) const;
 
   // Compute side effects of individual blocks and loops.
-  void Run();
+  bool Run();
 
   bool HasRun() const { return has_run_; }
 
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index cb27ded..5370f43 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -23,9 +23,10 @@
 
 namespace art {
 
-void SsaDeadPhiElimination::Run() {
+bool SsaDeadPhiElimination::Run() {
   MarkDeadPhis();
   EliminateDeadPhis();
+  return true;
 }
 
 void SsaDeadPhiElimination::MarkDeadPhis() {
@@ -122,7 +123,7 @@
   }
 }
 
-void SsaRedundantPhiElimination::Run() {
+bool SsaRedundantPhiElimination::Run() {
   // Use local allocator for allocating memory used by this optimization.
   ScopedArenaAllocator allocator(graph_->GetArenaStack());
 
@@ -255,6 +256,7 @@
       current->GetBlock()->RemovePhi(current);
     }
   }
+  return true;
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/ssa_phi_elimination.h b/compiler/optimizing/ssa_phi_elimination.h
index 11d5837..ee859e8 100644
--- a/compiler/optimizing/ssa_phi_elimination.h
+++ b/compiler/optimizing/ssa_phi_elimination.h
@@ -31,7 +31,7 @@
   explicit SsaDeadPhiElimination(HGraph* graph)
       : HOptimization(graph, kSsaDeadPhiEliminationPassName) {}
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
   void MarkDeadPhis();
   void EliminateDeadPhis();
@@ -53,7 +53,7 @@
   explicit SsaRedundantPhiElimination(HGraph* graph)
       : HOptimization(graph, kSsaRedundantPhiEliminationPassName) {}
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
   static constexpr const char* kSsaRedundantPhiEliminationPassName = "redundant_phi_elimination";
 
diff --git a/compiler/optimizing/x86_memory_gen.cc b/compiler/optimizing/x86_memory_gen.cc
index 0271850..f0069c0 100644
--- a/compiler/optimizing/x86_memory_gen.cc
+++ b/compiler/optimizing/x86_memory_gen.cc
@@ -76,9 +76,10 @@
       do_implicit_null_checks_(codegen->GetCompilerOptions().GetImplicitNullChecks()) {
 }
 
-void X86MemoryOperandGeneration::Run() {
+bool X86MemoryOperandGeneration::Run() {
   MemoryOperandVisitor visitor(graph_, do_implicit_null_checks_);
   visitor.VisitInsertionOrder();
+  return true;
 }
 
 }  // namespace x86
diff --git a/compiler/optimizing/x86_memory_gen.h b/compiler/optimizing/x86_memory_gen.h
index 5f15d9f..b254000 100644
--- a/compiler/optimizing/x86_memory_gen.h
+++ b/compiler/optimizing/x86_memory_gen.h
@@ -31,7 +31,7 @@
                              CodeGenerator* codegen,
                              OptimizingCompilerStats* stats);
 
-  void Run() OVERRIDE;
+  bool Run() OVERRIDE;
 
   static constexpr const char* kX86MemoryOperandGenerationPassName =
           "x86_memory_operand_generation";