Merge "Cleanup and improve stack map stream"
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 8d20f1b..7598e50 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -1326,11 +1326,6 @@
     }
   }
 
-  // Now, restore lr to its non-temp status.
-  FreeTemp(tmp1);
-  Clobber(rs_rARM_LR);
-  UnmarkTemp(rs_rARM_LR);
-
   if (reg_status != 0) {
     // We had manually allocated registers for rl_result.
     // Now construct a RegLocation.
@@ -1338,7 +1333,14 @@
     rl_result.reg = RegStorage::MakeRegPair(res_lo, res_hi);
   }
 
+  // Free tmp1 but keep LR as temp for StoreValueWide() if needed.
+  FreeTemp(tmp1);
+
   StoreValueWide(rl_dest, rl_result);
+
+  // Now, restore lr to its non-temp status.
+  Clobber(rs_rARM_LR);
+  UnmarkTemp(rs_rARM_LR);
 }
 
 void ArmMir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
diff --git a/compiler/optimizing/boolean_simplifier.cc b/compiler/optimizing/boolean_simplifier.cc
index 06328f2..6ebfb45 100644
--- a/compiler/optimizing/boolean_simplifier.cc
+++ b/compiler/optimizing/boolean_simplifier.cc
@@ -72,8 +72,8 @@
       return graph->GetIntConstant(0);
     }
   } else {
-    // General case when 'cond' is another instruction of type boolean.
-    DCHECK_EQ(cond->GetType(), Primitive::Type::kPrimBoolean);
+    // General case when 'cond' is another instruction of type boolean,
+    // as verified by SSAChecker.
     return new (allocator) HBooleanNot(cond);
   }
 }
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 38fa043..ae1fb53 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -3898,9 +3898,11 @@
   SlowPathCodeARM* slow_path = nullptr;
 
   // Return 0 if `obj` is null.
-  // TODO: avoid this check if we know obj is not null.
-  __ cmp(obj, ShifterOperand(0));
-  __ b(&zero, EQ);
+  // avoid null check if we know obj is not null.
+  if (instruction->MustDoNullCheck()) {
+    __ cmp(obj, ShifterOperand(0));
+    __ b(&zero, EQ);
+  }
   // Compare the class of `obj` with `cls`.
   __ LoadFromOffset(kLoadWord, out, obj, class_offset);
   __ cmp(out, ShifterOperand(cls));
@@ -3919,8 +3921,12 @@
     __ LoadImmediate(out, 1);
     __ b(&done);
   }
-  __ Bind(&zero);
-  __ LoadImmediate(out, 0);
+
+  if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) {
+    __ Bind(&zero);
+    __ LoadImmediate(out, 0);
+  }
+
   if (slow_path != nullptr) {
     __ Bind(slow_path->GetExitLabel());
   }
@@ -3946,9 +3952,11 @@
       instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
   codegen_->AddSlowPath(slow_path);
 
-  // TODO: avoid this check if we know obj is not null.
-  __ cmp(obj, ShifterOperand(0));
-  __ b(slow_path->GetExitLabel(), EQ);
+  // avoid null check if we know obj is not null.
+  if (instruction->MustDoNullCheck()) {
+    __ cmp(obj, ShifterOperand(0));
+    __ b(slow_path->GetExitLabel(), EQ);
+  }
   // Compare the class of `obj` with `cls`.
   __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
   __ cmp(temp, ShifterOperand(cls));
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 23ba339..1c6debd 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1452,8 +1452,10 @@
       instruction, locations->InAt(1), LocationFrom(obj_cls), instruction->GetDexPc());
   codegen_->AddSlowPath(slow_path);
 
-  // TODO: avoid this check if we know obj is not null.
-  __ Cbz(obj, slow_path->GetExitLabel());
+  // Avoid null check if we know obj is not null.
+  if (instruction->MustDoNullCheck()) {
+    __ Cbz(obj, slow_path->GetExitLabel());
+  }
   // Compare the class of `obj` with `cls`.
   __ Ldr(obj_cls, HeapOperand(obj, mirror::Object::ClassOffset()));
   __ Cmp(obj_cls, cls);
@@ -1855,9 +1857,11 @@
   vixl::Label done;
 
   // Return 0 if `obj` is null.
-  // TODO: Avoid this check if we know `obj` is not null.
-  __ Mov(out, 0);
-  __ Cbz(obj, &done);
+  // Avoid null check if we know `obj` is not null.
+  if (instruction->MustDoNullCheck()) {
+    __ Mov(out, 0);
+    __ Cbz(obj, &done);
+  }
 
   // Compare the class of `obj` with `cls`.
   __ Ldr(out, HeapOperand(obj, mirror::Object::ClassOffset()));
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 3dcfca6..c604842 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -4250,9 +4250,11 @@
   SlowPathCodeX86* slow_path = nullptr;
 
   // Return 0 if `obj` is null.
-  // TODO: avoid this check if we know obj is not null.
-  __ testl(obj, obj);
-  __ j(kEqual, &zero);
+  // Avoid null check if we know obj is not null.
+  if (instruction->MustDoNullCheck()) {
+    __ testl(obj, obj);
+    __ j(kEqual, &zero);
+  }
   __ movl(out, Address(obj, class_offset));
   // Compare the class of `obj` with `cls`.
   if (cls.IsRegister()) {
@@ -4277,8 +4279,12 @@
     __ movl(out, Immediate(1));
     __ jmp(&done);
   }
-  __ Bind(&zero);
-  __ movl(out, Immediate(0));
+
+  if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) {
+    __ Bind(&zero);
+    __ movl(out, Immediate(0));
+  }
+
   if (slow_path != nullptr) {
     __ Bind(slow_path->GetExitLabel());
   }
@@ -4303,11 +4309,13 @@
       instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
   codegen_->AddSlowPath(slow_path);
 
-  // TODO: avoid this check if we know obj is not null.
-  __ testl(obj, obj);
-  __ j(kEqual, slow_path->GetExitLabel());
-  __ movl(temp, Address(obj, class_offset));
+  // Avoid null check if we know obj is not null.
+  if (instruction->MustDoNullCheck()) {
+    __ testl(obj, obj);
+    __ j(kEqual, slow_path->GetExitLabel());
+  }
 
+  __ movl(temp, Address(obj, class_offset));
   // Compare the class of `obj` with `cls`.
   if (cls.IsRegister()) {
     __ cmpl(temp, cls.AsRegister<Register>());
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index b404f8d..47425fb 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -4181,9 +4181,11 @@
   SlowPathCodeX86_64* slow_path = nullptr;
 
   // Return 0 if `obj` is null.
-  // TODO: avoid this check if we know obj is not null.
-  __ testl(obj, obj);
-  __ j(kEqual, &zero);
+  // Avoid null check if we know obj is not null.
+  if (instruction->MustDoNullCheck()) {
+    __ testl(obj, obj);
+    __ j(kEqual, &zero);
+  }
   // Compare the class of `obj` with `cls`.
   __ movl(out, Address(obj, class_offset));
   if (cls.IsRegister()) {
@@ -4207,8 +4209,12 @@
     __ movl(out, Immediate(1));
     __ jmp(&done);
   }
-  __ Bind(&zero);
-  __ movl(out, Immediate(0));
+
+  if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) {
+    __ Bind(&zero);
+    __ movl(out, Immediate(0));
+  }
+
   if (slow_path != nullptr) {
     __ Bind(slow_path->GetExitLabel());
   }
@@ -4233,9 +4239,11 @@
       instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
   codegen_->AddSlowPath(slow_path);
 
-  // TODO: avoid this check if we know obj is not null.
-  __ testl(obj, obj);
-  __ j(kEqual, slow_path->GetExitLabel());
+  // Avoid null check if we know obj is not null.
+  if (instruction->MustDoNullCheck()) {
+    __ testl(obj, obj);
+    __ j(kEqual, slow_path->GetExitLabel());
+  }
   // Compare the class of `obj` with `cls`.
   __ movl(temp, Address(obj, class_offset));
   if (cls.IsRegister()) {
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 225af77..2df7c16 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -62,6 +62,7 @@
   void VisitSub(HSub* instruction) OVERRIDE;
   void VisitUShr(HUShr* instruction) OVERRIDE;
   void VisitXor(HXor* instruction) OVERRIDE;
+  void VisitInstanceOf(HInstanceOf* instruction) OVERRIDE;
 
   OptimizingCompilerStats* stats_;
   bool simplification_occurred_ = false;
@@ -159,6 +160,10 @@
 
 void InstructionSimplifierVisitor::VisitCheckCast(HCheckCast* check_cast) {
   HLoadClass* load_class = check_cast->InputAt(1)->AsLoadClass();
+  if (!check_cast->InputAt(0)->CanBeNull()) {
+    check_cast->ClearMustDoNullCheck();
+  }
+
   if (!load_class->IsResolved()) {
     // If the class couldn't be resolve it's not safe to compare against it. It's
     // default type would be Top which might be wider that the actual class type
@@ -176,6 +181,12 @@
   }
 }
 
+void InstructionSimplifierVisitor::VisitInstanceOf(HInstanceOf* instruction) {
+  if (!instruction->InputAt(0)->CanBeNull()) {
+    instruction->ClearMustDoNullCheck();
+  }
+}
+
 void InstructionSimplifierVisitor::VisitSuspendCheck(HSuspendCheck* check) {
   HBasicBlock* block = check->GetBlock();
   // Currently always keep the suspend check at entry.
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 19227ca..b89487f 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -3355,6 +3355,7 @@
               uint32_t dex_pc)
       : HExpression(Primitive::kPrimBoolean, SideEffects::None()),
         class_is_final_(class_is_final),
+        must_do_null_check_(true),
         dex_pc_(dex_pc) {
     SetRawInputAt(0, object);
     SetRawInputAt(1, constant);
@@ -3374,10 +3375,15 @@
 
   bool IsClassFinal() const { return class_is_final_; }
 
+  // Used only in code generation.
+  bool MustDoNullCheck() const { return must_do_null_check_; }
+  void ClearMustDoNullCheck() { must_do_null_check_ = false; }
+
   DECLARE_INSTRUCTION(InstanceOf);
 
  private:
   const bool class_is_final_;
+  bool must_do_null_check_;
   const uint32_t dex_pc_;
 
   DISALLOW_COPY_AND_ASSIGN(HInstanceOf);
@@ -3418,6 +3424,7 @@
              uint32_t dex_pc)
       : HTemplateInstruction(SideEffects::None()),
         class_is_final_(class_is_final),
+        must_do_null_check_(true),
         dex_pc_(dex_pc) {
     SetRawInputAt(0, object);
     SetRawInputAt(1, constant);
@@ -3436,6 +3443,9 @@
 
   bool CanThrow() const OVERRIDE { return true; }
 
+  bool MustDoNullCheck() const { return must_do_null_check_; }
+  void ClearMustDoNullCheck() { must_do_null_check_ = false; }
+
   uint32_t GetDexPc() const { return dex_pc_; }
 
   bool IsClassFinal() const { return class_is_final_; }
@@ -3444,6 +3454,7 @@
 
  private:
   const bool class_is_final_;
+  bool must_do_null_check_;
   const uint32_t dex_pc_;
 
   DISALLOW_COPY_AND_ASSIGN(HCheckCast);
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 218894f..d99d359 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -324,7 +324,7 @@
   HDeadCodeElimination dce2(graph, stats, "dead_code_elimination_final");
   HConstantFolding fold1(graph);
   InstructionSimplifier simplify1(graph, stats);
-  HBooleanSimplifier boolean_not(graph);
+  HBooleanSimplifier boolean_simplify(graph);
 
   HInliner inliner(graph, dex_compilation_unit, dex_compilation_unit, driver, stats);
 
@@ -343,10 +343,10 @@
     &dce1,
     &fold1,
     &simplify1,
+    &inliner,
     // BooleanSimplifier depends on the InstructionSimplifier removing redundant
     // suspend checks to recognize empty blocks.
-    &boolean_not,
-    &inliner,
+    &boolean_simplify,
     &fold2,
     &side_effects,
     &gvn,
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index f8e00f6..0fdf051 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -378,7 +378,7 @@
     // Split just before first register use.
     size_t first_register_use = current->FirstRegisterUse();
     if (first_register_use != kNoLifetime) {
-      LiveInterval* split = Split(current, first_register_use - 1);
+      LiveInterval* split = SplitBetween(current, current->GetStart(), first_register_use - 1);
       // Don't add directly to `unhandled`, it needs to be sorted and the start
       // of this new interval might be after intervals already in the list.
       AddSorted(&unhandled, split);
@@ -997,7 +997,7 @@
       // If the first use of that instruction is after the last use of the found
       // register, we split this interval just before its first register use.
       AllocateSpillSlotFor(current);
-      LiveInterval* split = Split(current, first_register_use - 1);
+      LiveInterval* split = SplitBetween(current, current->GetStart(), first_register_use - 1);
       if (current == split) {
         DumpInterval(std::cerr, current);
         DumpAllIntervals(std::cerr);
@@ -1100,6 +1100,31 @@
   }
 }
 
+LiveInterval* RegisterAllocator::SplitBetween(LiveInterval* interval, size_t from, size_t to) {
+  HBasicBlock* block_from = liveness_.GetBlockFromPosition(from);
+  HBasicBlock* block_to = liveness_.GetBlockFromPosition(to);
+  DCHECK(block_from != nullptr);
+  DCHECK(block_to != nullptr);
+
+  // Both locations are in the same block. We split at the given location.
+  if (block_from == block_to) {
+    return Split(interval, to);
+  }
+
+  // If `to` is in a loop, find the outermost loop header which does not contain `from`.
+  for (HLoopInformationOutwardIterator it(*block_to); !it.Done(); it.Advance()) {
+    HBasicBlock* header = it.Current()->GetHeader();
+    if (block_from->GetLifetimeStart() >= header->GetLifetimeStart()) {
+      break;
+    }
+    block_to = header;
+  }
+
+  // Split at the start of the found block, to piggy back on existing moves
+  // due to resolution if non-linear control flow (see `ConnectSplitSiblings`).
+  return Split(interval, block_to->GetLifetimeStart());
+}
+
 LiveInterval* RegisterAllocator::Split(LiveInterval* interval, size_t position) {
   DCHECK_GE(position, interval->GetStart());
   DCHECK(!interval->IsDeadAt(position));
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index 717be75..dc9c708 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -86,8 +86,12 @@
   // Add `interval` in the given sorted list.
   static void AddSorted(GrowableArray<LiveInterval*>* array, LiveInterval* interval);
 
-  // Split `interval` at the position `at`. The new interval starts at `at`.
-  LiveInterval* Split(LiveInterval* interval, size_t at);
+  // Split `interval` at the position `position`. The new interval starts at `position`.
+  LiveInterval* Split(LiveInterval* interval, size_t position);
+
+  // Split `interval` at a position between `from` and `to`. The method will try
+  // to find an optimal split position.
+  LiveInterval* SplitBetween(LiveInterval* interval, size_t from, size_t to);
 
   // Returns whether `reg` is blocked by the code generator.
   bool IsBlocked(int reg) const;
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 182cd0e..8c6d904 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -854,6 +854,10 @@
       X86InstructionSetFeatures::FromCppDefines());
   x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
   SsaLivenessAnalysis liveness(graph, &codegen);
+  // Populate the instructions in the liveness object, to please the register allocator.
+  for (size_t i = 0; i < 32; ++i) {
+    liveness.instructions_from_lifetime_position_.Add(user);
+  }
 
   RegisterAllocator register_allocator(&allocator, &codegen, liveness);
   register_allocator.unhandled_core_intervals_.Add(fourth);
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index fe70d3a..97254ed 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -998,6 +998,15 @@
     return instructions_from_lifetime_position_.Get(index);
   }
 
+  HBasicBlock* GetBlockFromPosition(size_t index) const {
+    HInstruction* instruction = GetInstructionFromPosition(index / 2);
+    if (instruction == nullptr) {
+      // If we are at a block boundary, get the block following.
+      instruction = GetInstructionFromPosition((index / 2) + 1);
+    }
+    return instruction->GetBlock();
+  }
+
   HInstruction* GetTempUser(LiveInterval* temp) const {
     // A temporary shares the same lifetime start as the instruction that requires it.
     DCHECK(temp->IsTemp());
@@ -1068,6 +1077,8 @@
   GrowableArray<HInstruction*> instructions_from_lifetime_position_;
   size_t number_of_ssa_values_;
 
+  ART_FRIEND_TEST(RegisterAllocatorTest, SpillInactive);
+
   DISALLOW_COPY_AND_ASSIGN(SsaLivenessAnalysis);
 };
 
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index e546738..3099094 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -615,11 +615,21 @@
 
   // Wrap any exception with "Ljava/lang/reflect/InvocationTargetException;" and return early.
   if (soa.Self()->IsExceptionPending()) {
+    // If we get another exception when we are trying to wrap, then just use that instead.
     jthrowable th = soa.Env()->ExceptionOccurred();
-    soa.Env()->ExceptionClear();
+    soa.Self()->ClearException();
     jclass exception_class = soa.Env()->FindClass("java/lang/reflect/InvocationTargetException");
+    if (exception_class == nullptr) {
+      soa.Self()->AssertPendingOOMException();
+      return nullptr;
+    }
     jmethodID mid = soa.Env()->GetMethodID(exception_class, "<init>", "(Ljava/lang/Throwable;)V");
+    CHECK(mid != nullptr);
     jobject exception_instance = soa.Env()->NewObject(exception_class, mid, th);
+    if (exception_instance == nullptr) {
+      soa.Self()->AssertPendingOOMException();
+      return nullptr;
+    }
     soa.Env()->Throw(reinterpret_cast<jthrowable>(exception_instance));
     return nullptr;
   }
diff --git a/runtime/thread.cc b/runtime/thread.cc
index fa65bce..b27ad4a 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1171,9 +1171,14 @@
 }
 
 void Thread::AssertPendingException() const {
-  if (UNLIKELY(!IsExceptionPending())) {
-    LOG(FATAL) << "Pending exception expected.";
-  }
+  CHECK(IsExceptionPending()) << "Pending exception expected.";
+}
+
+void Thread::AssertPendingOOMException() const {
+  AssertPendingException();
+  auto* e = GetException();
+  CHECK_EQ(e->GetClass(), DecodeJObject(WellKnownClasses::java_lang_OutOfMemoryError)->AsClass())
+      << e->Dump();
 }
 
 void Thread::AssertNoPendingException() const {
diff --git a/runtime/thread.h b/runtime/thread.h
index dd9e734..35b785d 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -336,6 +336,7 @@
   }
 
   void AssertPendingException() const;
+  void AssertPendingOOMException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void AssertNoPendingException() const;
   void AssertNoPendingExceptionForNewException(const char* msg) const;
 
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index a803df8..a2d0427 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -39,6 +39,7 @@
 jclass WellKnownClasses::java_lang_Daemons;
 jclass WellKnownClasses::java_lang_Error;
 jclass WellKnownClasses::java_lang_Object;
+jclass WellKnownClasses::java_lang_OutOfMemoryError;
 jclass WellKnownClasses::java_lang_reflect_AbstractMethod;
 jclass WellKnownClasses::java_lang_reflect_ArtMethod;
 jclass WellKnownClasses::java_lang_reflect_Constructor;
@@ -176,6 +177,7 @@
   java_lang_ClassNotFoundException = CacheClass(env, "java/lang/ClassNotFoundException");
   java_lang_Daemons = CacheClass(env, "java/lang/Daemons");
   java_lang_Object = CacheClass(env, "java/lang/Object");
+  java_lang_OutOfMemoryError = CacheClass(env, "java/lang/OutOfMemoryError");
   java_lang_Error = CacheClass(env, "java/lang/Error");
   java_lang_reflect_AbstractMethod = CacheClass(env, "java/lang/reflect/AbstractMethod");
   java_lang_reflect_ArtMethod = CacheClass(env, "java/lang/reflect/ArtMethod");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 2df1c0e..cef9d55 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -50,6 +50,7 @@
   static jclass java_lang_Daemons;
   static jclass java_lang_Error;
   static jclass java_lang_Object;
+  static jclass java_lang_OutOfMemoryError;
   static jclass java_lang_reflect_AbstractMethod;
   static jclass java_lang_reflect_ArtMethod;
   static jclass java_lang_reflect_Constructor;
diff --git a/test/080-oom-throw/expected.txt b/test/080-oom-throw/expected.txt
index 73cc0d8..904393b 100644
--- a/test/080-oom-throw/expected.txt
+++ b/test/080-oom-throw/expected.txt
@@ -1,2 +1,3 @@
+Test reflection correctly threw
 NEW_ARRAY correctly threw OOME
 NEW_INSTANCE correctly threw OOME
diff --git a/test/080-oom-throw/src/Main.java b/test/080-oom-throw/src/Main.java
index c93f8bb..f007b25 100644
--- a/test/080-oom-throw/src/Main.java
+++ b/test/080-oom-throw/src/Main.java
@@ -14,6 +14,9 @@
  * limitations under the License.
  */
 
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+
 public class Main {
     static class ArrayMemEater {
         static boolean sawOome;
@@ -68,6 +71,10 @@
     }
 
     public static void main(String[] args) {
+        if (triggerReflectionOOM()) {
+            System.out.println("Test reflection correctly threw");
+        }
+
         if (triggerArrayOOM()) {
             System.out.println("NEW_ARRAY correctly threw OOME");
         }
@@ -76,4 +83,46 @@
             System.out.println("NEW_INSTANCE correctly threw OOME");
         }
     }
+
+    static Object[] holder;
+
+    public static void blowup() throws Exception {
+        int size = 32 * 1024 * 1024;
+        for (int i = 0; i < holder.length; ) {
+            try {
+                holder[i] = new char[size];
+                i++;
+            } catch (OutOfMemoryError oome) {
+                size = size / 2;
+                if (size == 0) {
+                     break;
+                }
+            }
+        }
+        holder[0] = new char[100000];
+    }
+
+    static boolean triggerReflectionOOM() {
+        try {
+            Class<?> c = Main.class;
+            Method m = c.getMethod("blowup", (Class[]) null);
+            holder = new Object[1000000];
+            m.invoke(null);
+            holder = null;
+            System.out.println("Didn't throw from blowup");
+        } catch (OutOfMemoryError e) {
+            holder = null;
+        } catch (InvocationTargetException e) {
+            holder = null;
+            if (!(e.getCause() instanceof OutOfMemoryError)) {
+                System.out.println("InvocationTargetException cause not OOME " + e.getCause());
+                return false;
+            }
+        } catch (Exception e) {
+            holder = null;
+            System.out.println("Unexpected exception " + e);
+            return false;
+        }
+        return true;
+    }
 }
diff --git a/test/104-growth-limit/src/Main.java b/test/104-growth-limit/src/Main.java
index d666377..d31cbf1 100644
--- a/test/104-growth-limit/src/Main.java
+++ b/test/104-growth-limit/src/Main.java
@@ -29,26 +29,28 @@
         final Method get_runtime = vm_runtime.getDeclaredMethod("getRuntime");
         final Object runtime = get_runtime.invoke(null);
         final Method clear_growth_limit = vm_runtime.getDeclaredMethod("clearGrowthLimit");
+        List<byte[]> l = new ArrayList<byte[]>();
         try {
-            List<byte[]> l = new ArrayList<byte[]>();
             while (true) {
                 // Allocate a MB at a time
                 l.add(new byte[1048576]);
                 alloc1++;
             }
         } catch (OutOfMemoryError e) {
+            l = null;
         }
         // Expand the heap to the maximum size.
         clear_growth_limit.invoke(runtime);
         int alloc2 = 1;
+        l = new ArrayList<byte[]>();
         try {
-            List<byte[]> l = new ArrayList<byte[]>();
             while (true) {
                 // Allocate a MB at a time
                 l.add(new byte[1048576]);
                 alloc2++;
             }
         } catch (OutOfMemoryError e2) {
+            l = null;
             if (alloc1 > alloc2) {
                 System.out.println("ERROR: Allocated less memory after growth" +
                     "limit cleared (" + alloc1 + " MBs > " + alloc2 + " MBs");
diff --git a/test/474-checker-boolean-input/src/Main.java b/test/474-checker-boolean-input/src/Main.java
index 1ebe14e..9151986 100644
--- a/test/474-checker-boolean-input/src/Main.java
+++ b/test/474-checker-boolean-input/src/Main.java
@@ -23,35 +23,11 @@
   }
 
   /*
-   * Test that zero/one constants are accepted as Boolean inputs.
-   */
-
-  // CHECK-START: boolean Main.TestConstAsBoolean() inliner (before)
-  // CHECK-DAG:     [[Invoke:z\d+]]  InvokeStaticOrDirect
-  // CHECK-DAG:                      BooleanNot [ [[Invoke]] ]
-
-  // CHECK-START: boolean Main.TestConstAsBoolean() inliner (after)
-  // CHECK-DAG:     [[Const:i\d+]]   IntConstant 1
-  // CHECK-DAG:                      BooleanNot [ [[Const]] ]
-
-  public static boolean InlineConst() {
-    return true;
-  }
-
-  public static boolean TestConstAsBoolean() {
-    return InlineConst() != true ? true : false;
-  }
-
-  /*
    * Test that integer Phis are accepted as Boolean inputs until
    * we implement a suitable type analysis.
    */
 
-  // CHECK-START: boolean Main.TestPhiAsBoolean(int) inliner (before)
-  // CHECK-DAG:     [[Invoke:z\d+]]  InvokeStaticOrDirect
-  // CHECK-DAG:                      BooleanNot [ [[Invoke]] ]
-
-  // CHECK-START: boolean Main.TestPhiAsBoolean(int) inliner (after)
+  // CHECK-START: boolean Main.TestPhiAsBoolean(int) boolean_simplifier (after)
   // CHECK-DAG:     [[Phi:i\d+]]     Phi
   // CHECK-DAG:                      BooleanNot [ [[Phi]] ]
 
@@ -71,11 +47,7 @@
    * we implement a suitable type analysis.
    */
 
-  // CHECK-START: boolean Main.TestAndAsBoolean(boolean, boolean) inliner (before)
-  // CHECK-DAG:     [[Invoke:z\d+]]  InvokeStaticOrDirect
-  // CHECK-DAG:                      BooleanNot [ [[Invoke]] ]
-
-  // CHECK-START: boolean Main.TestAndAsBoolean(boolean, boolean) inliner (after)
+  // CHECK-START: boolean Main.TestAndAsBoolean(boolean, boolean) boolean_simplifier (after)
   // CHECK-DAG:     [[And:i\d+]]     And
   // CHECK-DAG:                      BooleanNot [ [[And]] ]
 
@@ -92,11 +64,7 @@
    * we implement a suitable type analysis.
    */
 
-  // CHECK-START: boolean Main.TestOrAsBoolean(boolean, boolean) inliner (before)
-  // CHECK-DAG:     [[Invoke:z\d+]]  InvokeStaticOrDirect
-  // CHECK-DAG:                      BooleanNot [ [[Invoke]] ]
-
-  // CHECK-START: boolean Main.TestOrAsBoolean(boolean, boolean) inliner (after)
+  // CHECK-START: boolean Main.TestOrAsBoolean(boolean, boolean) boolean_simplifier (after)
   // CHECK-DAG:     [[Or:i\d+]]      Or
   // CHECK-DAG:                      BooleanNot [ [[Or]] ]
 
@@ -113,11 +81,7 @@
    * we implement a suitable type analysis.
    */
 
-  // CHECK-START: boolean Main.TestXorAsBoolean(boolean, boolean) inliner (before)
-  // CHECK-DAG:     [[Invoke:z\d+]]  InvokeStaticOrDirect
-  // CHECK-DAG:                      BooleanNot [ [[Invoke]] ]
-
-  // CHECK-START: boolean Main.TestXorAsBoolean(boolean, boolean) inliner (after)
+  // CHECK-START: boolean Main.TestXorAsBoolean(boolean, boolean) boolean_simplifier (after)
   // CHECK-DAG:     [[Xor:i\d+]]     Xor
   // CHECK-DAG:                      BooleanNot [ [[Xor]] ]
 
@@ -132,7 +96,6 @@
   public static void main(String[] args) {
     f1 = true;
     f2 = false;
-    assertBoolEquals(false, TestConstAsBoolean());
     assertBoolEquals(true, TestPhiAsBoolean(0));
     assertBoolEquals(false, TestPhiAsBoolean(42));
     assertBoolEquals(true, TestAndAsBoolean(true, false));
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 414e4df..8dd7573 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -225,7 +225,8 @@
 fi
 
 if [ "$USE_JVM" = "y" ]; then
-  ${JAVA} ${DEBUGGER_OPTS} ${JVM_VERIFY_ARG} -classpath classes $MAIN "$@"
+  # Xmx is necessary since we don't pass down the ART flags to JVM.
+  ${JAVA} ${DEBUGGER_OPTS} ${JVM_VERIFY_ARG} -Xmx256m -classpath classes $MAIN "$@"
   exit
 fi