Merge "Optimizing: Reduce memory usage of HInstructions."
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 992af29..5763cec 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -289,6 +289,11 @@
   TestWriteRead(ImageHeader::kStorageModeLZ4);
 }
 
+TEST_F(ImageTest, WriteReadLZ4HC) {
+  TestWriteRead(ImageHeader::kStorageModeLZ4HC);
+}
+
+
 TEST_F(ImageTest, ImageHeaderIsValid) {
     uint32_t image_begin = ART_BASE_ADDRESS;
     uint32_t image_size_ = 16 * KB;
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 5eff8f3..871435b 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -18,6 +18,7 @@
 
 #include <sys/stat.h>
 #include <lz4.h>
+#include <lz4hc.h>
 
 #include <memory>
 #include <numeric>
@@ -224,18 +225,28 @@
     char* image_data = reinterpret_cast<char*>(image_info.image_->Begin()) + sizeof(ImageHeader);
     size_t data_size;
     const char* image_data_to_write;
+    const uint64_t compress_start_time = NanoTime();
 
     CHECK_EQ(image_header->storage_mode_, image_storage_mode_);
     switch (image_storage_mode_) {
       case ImageHeader::kStorageModeLZ4: {
-        size_t compressed_max_size = LZ4_compressBound(image_data_size);
+        const size_t compressed_max_size = LZ4_compressBound(image_data_size);
         compressed_data.reset(new char[compressed_max_size]);
         data_size = LZ4_compress(
             reinterpret_cast<char*>(image_info.image_->Begin()) + sizeof(ImageHeader),
             &compressed_data[0],
             image_data_size);
-        image_data_to_write = &compressed_data[0];
-        VLOG(compiler) << "Compressed from " << image_data_size << " to " << data_size;
+
+        break;
+      }
+      case ImageHeader::kStorageModeLZ4HC: {
+        // Bound is same as non HC.
+        const size_t compressed_max_size = LZ4_compressBound(image_data_size);
+        compressed_data.reset(new char[compressed_max_size]);
+        data_size = LZ4_compressHC(
+            reinterpret_cast<char*>(image_info.image_->Begin()) + sizeof(ImageHeader),
+            &compressed_data[0],
+            image_data_size);
         break;
       }
       case ImageHeader::kStorageModeUncompressed: {
@@ -249,6 +260,12 @@
       }
     }
 
+    if (compressed_data != nullptr) {
+      image_data_to_write = &compressed_data[0];
+      VLOG(compiler) << "Compressed from " << image_data_size << " to " << data_size << " in "
+                     << PrettyDuration(NanoTime() - compress_start_time);
+    }
+
     // Write header first, as uncompressed.
     image_header->data_size_ = data_size;
     if (!image_file->WriteFully(image_info.image_->Begin(), sizeof(ImageHeader))) {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index c27209f..985dc05 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1862,6 +1862,36 @@
   HandleBinaryOp(instruction);
 }
 
+void LocationsBuilderARM64::VisitArm64BitwiseNegatedRight(HArm64BitwiseNegatedRight* instr) {
+  DCHECK(Primitive::IsIntegralType(instr->GetType())) << instr->GetType();
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+  locations->SetInAt(0, Location::RequiresRegister());
+  // There is no immediate variant of negated bitwise instructions in AArch64.
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorARM64::VisitArm64BitwiseNegatedRight(
+    HArm64BitwiseNegatedRight* instr) {
+  Register dst = OutputRegister(instr);
+  Register lhs = InputRegisterAt(instr, 0);
+  Register rhs = InputRegisterAt(instr, 1);
+
+  switch (instr->GetOpKind()) {
+    case HInstruction::kAnd:
+      __ Bic(dst, lhs, rhs);
+      break;
+    case HInstruction::kOr:
+      __ Orn(dst, lhs, rhs);
+      break;
+    case HInstruction::kXor:
+      __ Eon(dst, lhs, rhs);
+      break;
+    default:
+      LOG(FATAL) << "Unreachable";
+  }
+}
+
 void LocationsBuilderARM64::VisitArm64DataProcWithShifterOp(
     HArm64DataProcWithShifterOp* instruction) {
   DCHECK(instruction->GetType() == Primitive::kPrimInt ||
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index a3d6bcf..b9638f2 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -443,6 +443,10 @@
 #endif
 
 #ifdef ART_ENABLE_CODEGEN_arm64
+  void VisitArm64BitwiseNegatedRight(HArm64BitwiseNegatedRight* instruction) OVERRIDE {
+    StartAttributeStream("kind") << instruction->GetOpKind();
+  }
+
   void VisitArm64DataProcWithShifterOp(HArm64DataProcWithShifterOp* instruction) OVERRIDE {
     StartAttributeStream("kind") << instruction->GetInstrKind() << "+" << instruction->GetOpKind();
     if (HArm64DataProcWithShifterOp::IsShiftOp(instruction->GetOpKind())) {
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index a1e1cde..82a898a 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -552,9 +552,11 @@
     if (!IsExact(stride_expr, &stride_value)) {
       return;
     }
-    // Rewrite condition i != U into i < U or i > U if end condition is reached exactly.
-    if (cmp == kCondNE && ((stride_value == +1 && IsTaken(lower_expr, upper_expr, kCondLT)) ||
-                           (stride_value == -1 && IsTaken(lower_expr, upper_expr, kCondGT)))) {
+    // Rewrite condition i != U into strict end condition i < U or i > U if this end condition
+    // is reached exactly (tested by verifying if the loop has a unit stride and the non-strict
+    // condition would be always taken).
+    if (cmp == kCondNE && ((stride_value == +1 && IsTaken(lower_expr, upper_expr, kCondLE)) ||
+                           (stride_value == -1 && IsTaken(lower_expr, upper_expr, kCondGE)))) {
       cmp = stride_value > 0 ? kCondLT : kCondGT;
     }
     // Normalize a linear loop control with a nonzero stride:
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index b162696..f9b6910 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -216,6 +216,14 @@
         }
       }
     } while (RefineOuter(&v_min, &v_max));
+    // Exploit array length + c >= c, with c <= 0 to avoid arithmetic wrap-around anomalies
+    // (e.g. array length == maxint and c == 1 would yield minint).
+    if (request == kAtLeast) {
+      if (v_min.a_constant == 1 && v_min.b_constant <= 0 && v_min.instruction->IsArrayLength()) {
+        *value = v_min.b_constant;
+        return true;
+      }
+    }
   }
   return false;
 }
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index 83126a5..c2bbdcc 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -180,6 +180,53 @@
   return true;
 }
 
+bool InstructionSimplifierArm64Visitor::TryMergeNegatedInput(HBinaryOperation* op) {
+  DCHECK(op->IsAnd() || op->IsOr() || op->IsXor()) << op->DebugName();
+  HInstruction* left = op->GetLeft();
+  HInstruction* right = op->GetRight();
+
+  // Only consider the case where there is exactly one Not, with 2 Not's De
+  // Morgan's laws should be applied instead.
+  if (left->IsNot() ^ right->IsNot()) {
+    HInstruction* hnot = (left->IsNot() ? left : right);
+    HInstruction* hother = (left->IsNot() ? right : left);
+
+    // Only do the simplification if the Not has only one use and can thus be
+    // safely removed. Even though ARM64 negated bitwise operations do not have
+    // an immediate variant (only register), we still do the simplification when
+    // `hother` is a constant, because it removes an instruction if the constant
+    // cannot be encoded as an immediate:
+    //   mov r0, #large_constant
+    //   neg r2, r1
+    //   and r0, r0, r2
+    // becomes:
+    //   mov r0, #large_constant
+    //   bic r0, r0, r1
+    if (hnot->HasOnlyOneNonEnvironmentUse()) {
+      // Replace code looking like
+      //    NOT tmp, mask
+      //    AND dst, src, tmp   (respectively ORR, EOR)
+      // with
+      //    BIC dst, src, mask  (respectively ORN, EON)
+      HInstruction* src = hnot->AsNot()->GetInput();
+
+      HArm64BitwiseNegatedRight* neg_op = new (GetGraph()->GetArena())
+          HArm64BitwiseNegatedRight(op->GetType(), op->GetKind(), hother, src, op->GetDexPc());
+
+      op->GetBlock()->ReplaceAndRemoveInstructionWith(op, neg_op);
+      hnot->GetBlock()->RemoveInstruction(hnot);
+      RecordSimplification();
+      return true;
+    }
+  }
+
+  return false;
+}
+
+void InstructionSimplifierArm64Visitor::VisitAnd(HAnd* instruction) {
+  TryMergeNegatedInput(instruction);
+}
+
 void InstructionSimplifierArm64Visitor::VisitArrayGet(HArrayGet* instruction) {
   TryExtractArrayAccessAddress(instruction,
                                instruction->GetArray(),
@@ -200,6 +247,10 @@
   }
 }
 
+void InstructionSimplifierArm64Visitor::VisitOr(HOr* instruction) {
+  TryMergeNegatedInput(instruction);
+}
+
 void InstructionSimplifierArm64Visitor::VisitShl(HShl* instruction) {
   if (instruction->InputAt(1)->IsConstant()) {
     TryMergeIntoUsersShifterOperand(instruction);
@@ -232,5 +283,9 @@
   }
 }
 
+void InstructionSimplifierArm64Visitor::VisitXor(HXor* instruction) {
+  TryMergeNegatedInput(instruction);
+}
+
 }  // namespace arm64
 }  // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h
index 37a34c0..cf84587 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.h
+++ b/compiler/optimizing/instruction_simplifier_arm64.h
@@ -51,14 +51,21 @@
     return TryMergeIntoShifterOperand(use, bitfield_op, true);
   }
 
+  // For bitwise operations (And/Or/Xor) with a negated input, try to use
+  // a negated bitwise instruction.
+  bool TryMergeNegatedInput(HBinaryOperation* op);
+
   // HInstruction visitors, sorted alphabetically.
+  void VisitAnd(HAnd* instruction) OVERRIDE;
   void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
   void VisitArraySet(HArraySet* instruction) OVERRIDE;
   void VisitMul(HMul* instruction) OVERRIDE;
+  void VisitOr(HOr* instruction) OVERRIDE;
   void VisitShl(HShl* instruction) OVERRIDE;
   void VisitShr(HShr* instruction) OVERRIDE;
   void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
   void VisitUShr(HUShr* instruction) OVERRIDE;
+  void VisitXor(HXor* instruction) OVERRIDE;
 
   OptimizingCompilerStats* stats_;
 };
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 8cbdcbb..8e22f86 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1909,6 +1909,69 @@
   __ revsh(out, in);
 }
 
+void IntrinsicLocationsBuilderARM::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kNoCall,
+                                                            kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetInAt(2, Location::RequiresRegister());
+  locations->SetInAt(3, Location::RequiresRegister());
+  locations->SetInAt(4, Location::RequiresRegister());
+
+  locations->AddTemp(Location::RequiresRegister());
+  locations->AddTemp(Location::RequiresRegister());
+  locations->AddTemp(Location::RequiresRegister());
+  locations->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARM::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+  ArmAssembler* assembler = GetAssembler();
+  LocationSummary* locations = invoke->GetLocations();
+
+  // Check assumption that sizeof(Char) is 2 (used in scaling below).
+  const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
+  DCHECK_EQ(char_size, 2u);
+
+  // Location of data in char array buffer.
+  const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
+
+  // Location of char array data in string.
+  const uint32_t value_offset = mirror::String::ValueOffset().Uint32Value();
+
+  // void getCharsNoCheck(int srcBegin, int srcEnd, char[] dst, int dstBegin);
+  // Since getChars() calls getCharsNoCheck() - we use registers rather than constants.
+  Register srcObj = locations->InAt(0).AsRegister<Register>();
+  Register srcBegin = locations->InAt(1).AsRegister<Register>();
+  Register srcEnd = locations->InAt(2).AsRegister<Register>();
+  Register dstObj = locations->InAt(3).AsRegister<Register>();
+  Register dstBegin = locations->InAt(4).AsRegister<Register>();
+
+  Register src_ptr = locations->GetTemp(0).AsRegister<Register>();
+  Register src_ptr_end = locations->GetTemp(1).AsRegister<Register>();
+  Register dst_ptr = locations->GetTemp(2).AsRegister<Register>();
+  Register tmp = locations->GetTemp(3).AsRegister<Register>();
+
+  // src range to copy.
+  __ add(src_ptr, srcObj, ShifterOperand(value_offset));
+  __ add(src_ptr_end, src_ptr, ShifterOperand(srcEnd, LSL, 1));
+  __ add(src_ptr, src_ptr, ShifterOperand(srcBegin, LSL, 1));
+
+  // dst to be copied.
+  __ add(dst_ptr, dstObj, ShifterOperand(data_offset));
+  __ add(dst_ptr, dst_ptr, ShifterOperand(dstBegin, LSL, 1));
+
+  // Do the copy.
+  Label loop, done;
+  __ Bind(&loop);
+  __ cmp(src_ptr, ShifterOperand(src_ptr_end));
+  __ b(&done, EQ);
+  __ ldrh(tmp, Address(src_ptr, char_size, Address::PostIndex));
+  __ strh(tmp, Address(dst_ptr, char_size, Address::PostIndex));
+  __ b(&loop);
+  __ Bind(&done);
+}
+
 // Unimplemented intrinsics.
 
 #define UNIMPLEMENTED_INTRINSIC(Name)                                                  \
@@ -1933,7 +1996,6 @@
 UNIMPLEMENTED_INTRINSIC(UnsafeCASLong)     // High register pressure.
 UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
 UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
-UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
 
 UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
 UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index b5f15fe..19ccb3d 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1602,6 +1602,69 @@
   GenFPToFPCall(invoke, GetVIXLAssembler(), codegen_, kQuickNextAfter);
 }
 
+void IntrinsicLocationsBuilderARM64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kNoCall,
+                                                            kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetInAt(2, Location::RequiresRegister());
+  locations->SetInAt(3, Location::RequiresRegister());
+  locations->SetInAt(4, Location::RequiresRegister());
+
+  locations->AddTemp(Location::RequiresRegister());
+  locations->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARM64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
+  vixl::MacroAssembler* masm = GetVIXLAssembler();
+  LocationSummary* locations = invoke->GetLocations();
+
+  // Check assumption that sizeof(Char) is 2 (used in scaling below).
+  const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
+  DCHECK_EQ(char_size, 2u);
+
+  // Location of data in char array buffer.
+  const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
+
+  // Location of char array data in string.
+  const uint32_t value_offset = mirror::String::ValueOffset().Uint32Value();
+
+  // void getCharsNoCheck(int srcBegin, int srcEnd, char[] dst, int dstBegin);
+  // Since getChars() calls getCharsNoCheck() - we use registers rather than constants.
+  Register srcObj = XRegisterFrom(locations->InAt(0));
+  Register srcBegin = XRegisterFrom(locations->InAt(1));
+  Register srcEnd = XRegisterFrom(locations->InAt(2));
+  Register dstObj = XRegisterFrom(locations->InAt(3));
+  Register dstBegin = XRegisterFrom(locations->InAt(4));
+
+  Register src_ptr = XRegisterFrom(locations->GetTemp(0));
+  Register src_ptr_end = XRegisterFrom(locations->GetTemp(1));
+
+  UseScratchRegisterScope temps(masm);
+  Register dst_ptr = temps.AcquireX();
+  Register tmp = temps.AcquireW();
+
+  // src range to copy.
+  __ Add(src_ptr, srcObj, Operand(value_offset));
+  __ Add(src_ptr_end, src_ptr, Operand(srcEnd, LSL, 1));
+  __ Add(src_ptr, src_ptr, Operand(srcBegin, LSL, 1));
+
+  // dst to be copied.
+  __ Add(dst_ptr, dstObj, Operand(data_offset));
+  __ Add(dst_ptr, dst_ptr, Operand(dstBegin, LSL, 1));
+
+  // Do the copy.
+  vixl::Label loop, done;
+  __ Bind(&loop);
+  __ Cmp(src_ptr, src_ptr_end);
+  __ B(&done, eq);
+  __ Ldrh(tmp, MemOperand(src_ptr, char_size, vixl::PostIndex));
+  __ Strh(tmp, MemOperand(dst_ptr, char_size, vixl::PostIndex));
+  __ B(&loop);
+  __ Bind(&done);
+}
+
 // Unimplemented intrinsics.
 
 #define UNIMPLEMENTED_INTRINSIC(Name)                                                  \
@@ -1615,7 +1678,6 @@
 UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
 UNIMPLEMENTED_INTRINSIC(SystemArrayCopy)
 UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
-UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
 
 UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
 UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 174b29a..9eddfc7 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1270,6 +1270,7 @@
 #define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M)
 #else
 #define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M)                          \
+  M(Arm64BitwiseNegatedRight, Instruction)                              \
   M(Arm64DataProcWithShifterOp, Instruction)                            \
   M(Arm64IntermediateAddress, Instruction)
 #endif
diff --git a/compiler/optimizing/nodes_arm64.h b/compiler/optimizing/nodes_arm64.h
index 173852a..75a71e7 100644
--- a/compiler/optimizing/nodes_arm64.h
+++ b/compiler/optimizing/nodes_arm64.h
@@ -118,6 +118,66 @@
   DISALLOW_COPY_AND_ASSIGN(HArm64IntermediateAddress);
 };
 
+class HArm64BitwiseNegatedRight : public HBinaryOperation {
+ public:
+  HArm64BitwiseNegatedRight(Primitive::Type result_type,
+                            InstructionKind op,
+                            HInstruction* left,
+                            HInstruction* right,
+                            uint32_t dex_pc = kNoDexPc)
+    : HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc),
+      op_kind_(op) {
+    DCHECK(op == HInstruction::kAnd || op == HInstruction::kOr || op == HInstruction::kXor) << op;
+  }
+
+  template <typename T, typename U>
+  auto Compute(T x, U y) const -> decltype(x & ~y) {
+    static_assert(std::is_same<decltype(x & ~y), decltype(x | ~y)>::value &&
+                  std::is_same<decltype(x & ~y), decltype(x ^ ~y)>::value,
+                  "Inconsistent negated bitwise types");
+    switch (op_kind_) {
+      case HInstruction::kAnd:
+        return x & ~y;
+      case HInstruction::kOr:
+        return x | ~y;
+      case HInstruction::kXor:
+        return x ^ ~y;
+      default:
+        LOG(FATAL) << "Unreachable";
+        UNREACHABLE();
+    }
+  }
+
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+    return GetBlock()->GetGraph()->GetIntConstant(
+        Compute(x->GetValue(), y->GetValue()), GetDexPc());
+  }
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+    return GetBlock()->GetGraph()->GetLongConstant(
+        Compute(x->GetValue(), y->GetValue()), GetDexPc());
+  }
+  HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+    LOG(FATAL) << DebugName() << " is not defined for float values";
+    UNREACHABLE();
+  }
+  HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+    LOG(FATAL) << DebugName() << " is not defined for double values";
+    UNREACHABLE();
+  }
+
+  InstructionKind GetOpKind() const { return op_kind_; }
+
+  DECLARE_INSTRUCTION(Arm64BitwiseNegatedRight);
+
+ private:
+  // Specifies the bitwise operation, which will be then negated.
+  const InstructionKind op_kind_;
+
+  DISALLOW_COPY_AND_ASSIGN(HArm64BitwiseNegatedRight);
+};
+
 }  // namespace art
 
 #endif  // ART_COMPILER_OPTIMIZING_NODES_ARM64_H_
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index cac12d1..dfcb4bc 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -217,7 +217,7 @@
   UsageError("  --image=<file.art>: specifies an output image filename.");
   UsageError("      Example: --image=/system/framework/boot.art");
   UsageError("");
-  UsageError("  --image-format=(uncompressed|lz4):");
+  UsageError("  --image-format=(uncompressed|lz4|lz4hc):");
   UsageError("      Which format to store the image.");
   UsageError("      Example: --image-format=lz4");
   UsageError("      Default: uncompressed");
@@ -681,6 +681,8 @@
     const StringPiece format_str = option.substr(substr.length());
     if (format_str == "lz4") {
       image_storage_mode_ = ImageHeader::kStorageModeLZ4;
+    } else if (format_str == "lz4hc") {
+      image_storage_mode_ = ImageHeader::kStorageModeLZ4HC;
     } else if (format_str == "uncompressed") {
       image_storage_mode_ = ImageHeader::kStorageModeUncompressed;
     } else {
@@ -696,11 +698,6 @@
       Usage("Can't have both --image and (--app-image-fd or --app-image-file)");
     }
 
-    if (IsBootImage()) {
-      // We need the boot image to always be debuggable.
-      compiler_options_->debuggable_ = true;
-    }
-
     if (oat_filenames_.empty() && oat_fd_ == -1) {
       Usage("Output must be supplied with either --oat-file or --oat-fd");
     }
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 9ea0827..cd4daeb 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2608,18 +2608,6 @@
   return nullptr;
 }
 
-const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file,
-                                            uint16_t class_def_idx,
-                                            uint32_t method_idx) {
-  bool found;
-  OatFile::OatClass oat_class = FindOatClass(dex_file, class_def_idx, &found);
-  if (!found) {
-    return nullptr;
-  }
-  uint32_t oat_method_idx = GetOatMethodIndexFromMethodIndex(dex_file, class_def_idx, method_idx);
-  return oat_class.GetOatMethod(oat_method_idx).GetQuickCode();
-}
-
 bool ClassLinker::ShouldUseInterpreterEntrypoint(ArtMethod* method, const void* quick_code) {
   if (UNLIKELY(method->IsNative() || method->IsProxyMethod())) {
     return false;
@@ -2650,6 +2638,11 @@
     return true;
   }
 
+  if (Dbg::IsDebuggerActive()) {
+    // Boot image classes are AOT-compiled as non-debuggable.
+    return runtime->GetHeap()->IsInBootImageOatFile(quick_code);
+  }
+
   return false;
 }
 
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index a9448f7..aa55dac 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -472,12 +472,6 @@
   const void* GetQuickOatCodeFor(ArtMethod* method)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
-  // Get the oat code for a method from a method index.
-  const void* GetQuickOatCodeFor(const DexFile& dex_file,
-                                 uint16_t class_def_idx,
-                                 uint32_t method_idx)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-
   // Get compiled code for a method, return null if no code
   // exists. This is unlike Get..OatCodeFor which will return a bridge
   // or interpreter entrypoint.
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 904490a..bc65893 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -28,6 +28,7 @@
 #include "class_linker-inl.h"
 #include "dex_file-inl.h"
 #include "dex_instruction.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
 #include "gc/accounting/card_table-inl.h"
 #include "gc/allocation_record.h"
 #include "gc/scoped_gc_critical_section.h"
@@ -570,6 +571,29 @@
   return !Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly();
 }
 
+// Used to patch boot image method entry point to interpreter bridge.
+class UpdateEntryPointsClassVisitor : public ClassVisitor {
+ public:
+  explicit UpdateEntryPointsClassVisitor(instrumentation::Instrumentation* instrumentation)
+      : instrumentation_(instrumentation) {}
+
+  bool operator()(mirror::Class* klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+    auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+    for (auto& m : klass->GetMethods(pointer_size)) {
+      const void* code = m.GetEntryPointFromQuickCompiledCode();
+      if (Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
+          !m.IsNative() &&
+          !m.IsProxyMethod()) {
+        instrumentation_->UpdateMethodsCode(&m, GetQuickToInterpreterBridge());
+      }
+    }
+    return true;
+  }
+
+ private:
+  instrumentation::Instrumentation* const instrumentation_;
+};
+
 void Dbg::GoActive() {
   // Enable all debugging features, including scans for breakpoints.
   // This is a no-op if we're already active.
@@ -598,6 +622,14 @@
   }
 
   Runtime* runtime = Runtime::Current();
+  // Since boot image code is AOT compiled as not debuggable, we need to patch
+  // entry points of methods in boot image to interpreter bridge.
+  if (!runtime->GetInstrumentation()->IsForcedInterpretOnly()) {
+    ScopedObjectAccess soa(self);
+    UpdateEntryPointsClassVisitor visitor(runtime->GetInstrumentation());
+    runtime->GetClassLinker()->VisitClasses(&visitor);
+  }
+
   ScopedSuspendAll ssa(__FUNCTION__);
   if (RequiresDeoptimization()) {
     runtime->GetInstrumentation()->EnableDeoptimization();
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index a656fb8..4bee462 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -4058,6 +4058,15 @@
   return false;
 }
 
+bool Heap::IsInBootImageOatFile(const void* p) const {
+  for (gc::space::ImageSpace* space : boot_image_spaces_) {
+    if (space->GetOatFile()->Contains(p)) {
+      return true;
+    }
+  }
+  return false;
+}
+
 void Heap::GetBootImagesSize(uint32_t* boot_image_begin,
                              uint32_t* boot_image_end,
                              uint32_t* boot_oat_begin,
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index a181e23..6edb548 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -605,6 +605,9 @@
   bool ObjectIsInBootImageSpace(mirror::Object* obj) const
       SHARED_REQUIRES(Locks::mutator_lock_);
 
+  bool IsInBootImageOatFile(const void* p) const
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
   void GetBootImagesSize(uint32_t* boot_image_begin,
                          uint32_t* boot_image_end,
                          uint32_t* boot_oat_begin,
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 894ce9a..4ef36a4 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -1252,7 +1252,8 @@
     // Only care about the error message for the last address in addresses. We want to avoid the
     // overhead of printing the process maps if we can relocate.
     std::string* out_error_msg = (address == addresses.back()) ? &temp_error_msg : nullptr;
-    if (image_header->GetStorageMode() == ImageHeader::kStorageModeUncompressed) {
+    const ImageHeader::StorageMode storage_mode = image_header->GetStorageMode();
+    if (storage_mode == ImageHeader::kStorageModeUncompressed) {
       map.reset(MemMap::MapFileAtAddress(address,
                                          image_header->GetImageSize(),
                                          PROT_READ | PROT_WRITE,
@@ -1264,6 +1265,12 @@
                                          image_filename,
                                          /*out*/out_error_msg));
     } else {
+      if (storage_mode != ImageHeader::kStorageModeLZ4 &&
+          storage_mode != ImageHeader::kStorageModeLZ4HC) {
+        *error_msg = StringPrintf("Invalid storage mode in image header %d",
+                                  static_cast<int>(storage_mode));
+        return nullptr;
+      }
       // Reserve output and decompress into it.
       map.reset(MemMap::MapAnonymous(image_location,
                                      address,
@@ -1289,6 +1296,8 @@
         }
         memcpy(map->Begin(), image_header, sizeof(ImageHeader));
         const uint64_t start = NanoTime();
+        // LZ4HC and LZ4 have same internal format, both use LZ4_decompress.
+        TimingLogger::ScopedTiming timing2("LZ4 decompress image", &logger);
         const size_t decompressed_size = LZ4_decompress_safe(
             reinterpret_cast<char*>(temp_map->Begin()) + sizeof(ImageHeader),
             reinterpret_cast<char*>(map->Begin()) + write_offset,
diff --git a/runtime/image.h b/runtime/image.h
index 146ee00..8e5dbad 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -81,6 +81,7 @@
   enum StorageMode : uint32_t {
     kStorageModeUncompressed,
     kStorageModeLZ4,
+    kStorageModeLZ4HC,
     kStorageModeCount,  // Number of elements in enum.
   };
   static constexpr StorageMode kDefaultStorageMode = kStorageModeUncompressed;
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 7484635..b107b72 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -104,6 +104,14 @@
   method->SetEntryPointFromQuickCompiledCode(quick_code);
 }
 
+bool Instrumentation::NeedDebugVersionForBootImageCode(ArtMethod* method, const void* code) const
+    SHARED_REQUIRES(Locks::mutator_lock_) {
+  return Dbg::IsDebuggerActive() &&
+         Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
+         !method->IsNative() &&
+         !method->IsProxyMethod();
+}
+
 void Instrumentation::InstallStubsForMethod(ArtMethod* method) {
   if (!method->IsInvokable() || method->IsProxyMethod()) {
     // Do not change stubs for these methods.
@@ -124,6 +132,9 @@
       new_quick_code = GetQuickToInterpreterBridge();
     } else if (is_class_initialized || !method->IsStatic() || method->IsConstructor()) {
       new_quick_code = class_linker->GetQuickOatCodeFor(method);
+      if (NeedDebugVersionForBootImageCode(method, new_quick_code)) {
+        new_quick_code = GetQuickToInterpreterBridge();
+      }
     } else {
       new_quick_code = GetQuickResolutionStub();
     }
@@ -136,10 +147,13 @@
       // class, all its static methods code will be set to the instrumentation entry point.
       // For more details, see ClassLinker::FixupStaticTrampolines.
       if (is_class_initialized || !method->IsStatic() || method->IsConstructor()) {
-        if (entry_exit_stubs_installed_) {
+        new_quick_code = class_linker->GetQuickOatCodeFor(method);
+        if (NeedDebugVersionForBootImageCode(method, new_quick_code)) {
+          // Oat code should not be used. Don't install instrumentation stub and
+          // use interpreter for instrumentation.
+          new_quick_code = GetQuickToInterpreterBridge();
+        } else if (entry_exit_stubs_installed_) {
           new_quick_code = GetQuickInstrumentationEntryPoint();
-        } else {
-          new_quick_code = class_linker->GetQuickOatCodeFor(method);
         }
       } else {
         new_quick_code = GetQuickResolutionStub();
@@ -775,6 +789,9 @@
       UpdateEntrypoints(method, GetQuickResolutionStub());
     } else {
       const void* quick_code = class_linker->GetQuickOatCodeFor(method);
+      if (NeedDebugVersionForBootImageCode(method, quick_code)) {
+        quick_code = GetQuickToInterpreterBridge();
+      }
       UpdateEntrypoints(method, quick_code);
     }
 
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index e3cbf53..2e4be6b 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -247,6 +247,11 @@
     return forced_interpret_only_;
   }
 
+  // Code is in boot image oat file which isn't compiled as debuggable.
+  // Need debug version (interpreter or jitted) if that's the case.
+  bool NeedDebugVersionForBootImageCode(ArtMethod* method, const void* code) const
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
   bool AreExitStubsInstalled() const {
     return instrumentation_stubs_installed_;
   }
diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S
index b05360b..567550f 100644
--- a/runtime/interpreter/mterp/out/mterp_x86.S
+++ b/runtime/interpreter/mterp/out/mterp_x86.S
@@ -2989,8 +2989,13 @@
     call    SYMBOL(MterpInvokeVirtual)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 /*
  * Handle a virtual method call.
@@ -3022,8 +3027,13 @@
     call    SYMBOL(MterpInvokeSuper)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 /*
  * Handle a "super" method call.
@@ -3055,8 +3065,13 @@
     call    SYMBOL(MterpInvokeDirect)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -3081,8 +3096,13 @@
     call    SYMBOL(MterpInvokeStatic)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 
 
@@ -3108,8 +3128,13 @@
     call    SYMBOL(MterpInvokeInterface)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 /*
  * Handle an interface method call.
@@ -3155,8 +3180,13 @@
     call    SYMBOL(MterpInvokeVirtualRange)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -3181,8 +3211,13 @@
     call    SYMBOL(MterpInvokeSuperRange)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -3207,8 +3242,13 @@
     call    SYMBOL(MterpInvokeDirectRange)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -3233,8 +3273,13 @@
     call    SYMBOL(MterpInvokeStaticRange)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -3259,8 +3304,13 @@
     call    SYMBOL(MterpInvokeInterfaceRange)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -6002,8 +6052,13 @@
     call    SYMBOL(MterpInvokeVirtualQuick)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -6028,8 +6083,13 @@
     call    SYMBOL(MterpInvokeVirtualQuickRange)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
 
 
 /* ------------------------------ */
@@ -12851,13 +12911,17 @@
     call    SYMBOL(MterpHandleException)
     testb   %al, %al
     jz      MterpExceptionReturn
-    REFRESH_IBASE
     movl    OFF_FP_CODE_ITEM(rFP), %eax
     movl    OFF_FP_DEX_PC(rFP), %ecx
     lea     CODEITEM_INSNS_OFFSET(%eax), rPC
     lea     (rPC, %ecx, 2), rPC
     movl    rPC, OFF_FP_DEX_PC_PTR(rFP)
+    /* Do we need to switch interpreters? */
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     /* resume execution at catch block */
+    REFRESH_IBASE
     FETCH_INST
     GOTO_NEXT
     /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/x86/footer.S b/runtime/interpreter/mterp/x86/footer.S
index c67491e..64d72d7 100644
--- a/runtime/interpreter/mterp/x86/footer.S
+++ b/runtime/interpreter/mterp/x86/footer.S
@@ -115,13 +115,17 @@
     call    SYMBOL(MterpHandleException)
     testb   %al, %al
     jz      MterpExceptionReturn
-    REFRESH_IBASE
     movl    OFF_FP_CODE_ITEM(rFP), %eax
     movl    OFF_FP_DEX_PC(rFP), %ecx
     lea     CODEITEM_INSNS_OFFSET(%eax), rPC
     lea     (rPC, %ecx, 2), rPC
     movl    rPC, OFF_FP_DEX_PC_PTR(rFP)
+    /* Do we need to switch interpreters? */
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     /* resume execution at catch block */
+    REFRESH_IBASE
     FETCH_INST
     GOTO_NEXT
     /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/x86/invoke.S b/runtime/interpreter/mterp/x86/invoke.S
index bbd88cf..c23053b 100644
--- a/runtime/interpreter/mterp/x86/invoke.S
+++ b/runtime/interpreter/mterp/x86/invoke.S
@@ -16,5 +16,10 @@
     call    SYMBOL($helper)
     testb   %al, %al
     jz      MterpException
+    ADVANCE_PC 3
+    call    SYMBOL(MterpShouldSwitchInterpreters)
+    testb   %al, %al
+    jnz     MterpFallback
     RESTORE_IBASE
-    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+    FETCH_INST
+    GOTO_NEXT
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 1a28733..3e66ce2 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -63,7 +63,8 @@
      << "JIT data cache size=" << PrettySize(code_cache_->DataCacheSize()) << "\n"
      << "JIT current capacity=" << PrettySize(code_cache_->GetCurrentCapacity()) << "\n"
      << "JIT number of compiled code=" << code_cache_->NumberOfCompiledCode() << "\n"
-     << "JIT total number of compilations=" << code_cache_->NumberOfCompilations() << "\n";
+     << "JIT total number of compilations=" << code_cache_->NumberOfCompilations() << "\n"
+     << "JIT total number of osr compilations=" << code_cache_->NumberOfOsrCompilations() << "\n";
   cumulative_timings_.Dump(os);
 }
 
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 478b164..8858b48 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -128,7 +128,8 @@
       garbage_collect_code_(garbage_collect_code),
       used_memory_for_data_(0),
       used_memory_for_code_(0),
-      number_of_compilations_(0) {
+      number_of_compilations_(0),
+      number_of_osr_compilations_(0) {
 
   DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity);
   code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/);
@@ -338,6 +339,7 @@
     MutexLock mu(self, lock_);
     method_code_map_.Put(code_ptr, method);
     if (osr) {
+      number_of_osr_compilations_++;
       osr_code_map_.Put(method, code_ptr);
     } else {
       Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
@@ -366,6 +368,11 @@
   return number_of_compilations_;
 }
 
+size_t JitCodeCache::NumberOfOsrCompilations() {
+  MutexLock mu(Thread::Current(), lock_);
+  return number_of_osr_compilations_;
+}
+
 size_t JitCodeCache::CodeCacheSize() {
   MutexLock mu(Thread::Current(), lock_);
   return CodeCacheSizeLocked();
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index e5b8e6c..4574edf 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -73,6 +73,7 @@
 
   // Number of compilations done throughout the lifetime of the JIT.
   size_t NumberOfCompilations() REQUIRES(!lock_);
+  size_t NumberOfOsrCompilations() REQUIRES(!lock_);
 
   bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr)
       SHARED_REQUIRES(Locks::mutator_lock_)
@@ -304,6 +305,7 @@
 
   // Number of compilations done throughout the lifetime of the JIT.
   size_t number_of_compilations_ GUARDED_BY(lock_);
+  size_t number_of_osr_compilations_ GUARDED_BY(lock_);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
 };
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 910163c..fb91a8c 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -228,6 +228,10 @@
     return End() - Begin();
   }
 
+  bool Contains(const void* p) const {
+    return p >= Begin() && p < End();
+  }
+
   size_t BssSize() const {
     return BssEnd() - BssBegin();
   }
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 18cf81a..ea26d58 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -16,6 +16,8 @@
 
 #include "oat_file_manager.h"
 
+#define ATRACE_TAG ATRACE_TAG_DALVIK
+#include <cutils/trace.h>
 #include <memory>
 #include <queue>
 #include <vector>
@@ -386,13 +388,15 @@
             ScopedSuspendAll ssa("Add image space");
             runtime->GetHeap()->AddSpace(image_space.get());
           }
-          added_image_space = true;
-          if (runtime->GetClassLinker()->AddImageSpace(image_space.get(),
-                                                       h_loader,
-                                                       dex_elements,
-                                                       dex_location,
-                                                       /*out*/&dex_files,
-                                                       /*out*/&temp_error_msg)) {
+          ATRACE_BEGIN(StringPrintf("Adding image space for location %s", dex_location).c_str());
+          added_image_space = runtime->GetClassLinker()->AddImageSpace(image_space.get(),
+                                                                       h_loader,
+                                                                       dex_elements,
+                                                                       dex_location,
+                                                                       /*out*/&dex_files,
+                                                                       /*out*/&temp_error_msg);
+          ATRACE_END();
+          if (added_image_space) {
             // Successfully added image space to heap, release the map so that it does not get
             // freed.
             image_space.release();
@@ -407,7 +411,6 @@
               ScopedSuspendAll ssa("Remove image space");
               runtime->GetHeap()->RemoveSpace(image_space.get());
             }
-            added_image_space = false;
             // Non-fatal, don't update error_msg.
           }
         }
diff --git a/test/020-string/expected.txt b/test/020-string/expected.txt
index 081fea3..76b8929 100644
--- a/test/020-string/expected.txt
+++ b/test/020-string/expected.txt
@@ -5,3 +5,9 @@
 Got expected exception
 subStr is 'uick brown fox jumps over the lazy '
 Indexes are: 0:-1:0:43:33:-1:18:13:13:-1:18:18:-1:13:-1:-1:-1
+Got expected exception
+Got expected exception
+Got expected exception
+Got expected exception
+Got expected exception
+llo And
diff --git a/test/020-string/src/Main.java b/test/020-string/src/Main.java
index b876e6a..7108082 100644
--- a/test/020-string/src/Main.java
+++ b/test/020-string/src/Main.java
@@ -25,6 +25,7 @@
         basicTest();
         indexTest();
         constructorTest();
+        copyTest();
     }
 
     public static void basicTest() {
@@ -117,4 +118,48 @@
         String s14 = new String(codePoints, 1, 3);
         String s15 = new String(stringBuilder);
     }
+
+    public static void copyTest() {
+        String src = new String("Hello Android");
+        char[] dst = new char[7];
+        char[] tmp = null;
+
+        try {
+            src.getChars(2, 9, tmp, 0);
+            System.out.println("GLITCH: expected exception");
+        } catch (NullPointerException npe) {
+            System.out.println("Got expected exception");
+        }
+
+        try {
+            src.getChars(-1, 9, dst, 0);
+            System.out.println("GLITCH: expected exception");
+        } catch (StringIndexOutOfBoundsException sioobe) {
+            System.out.println("Got expected exception");
+        }
+
+        try {
+            src.getChars(2, 19, dst, 0);
+            System.out.println("GLITCH: expected exception");
+        } catch (StringIndexOutOfBoundsException sioobe) {
+            System.out.println("Got expected exception");
+        }
+
+        try {
+            src.getChars(2, 1, dst, 0);
+            System.out.println("GLITCH: expected exception");
+        } catch (StringIndexOutOfBoundsException sioobe) {
+            System.out.println("Got expected exception");
+        }
+
+        try {
+            src.getChars(2, 10, dst, 0);
+            System.out.println("GLITCH: expected exception");
+        } catch (ArrayIndexOutOfBoundsException aioobe) {
+            System.out.println("Got expected exception");
+        }
+
+        src.getChars(2, 9, dst, 0);
+        System.out.println(new String(dst));
+    }
 }
diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java
index 5b3fa14..93a9005 100644
--- a/test/082-inline-execute/src/Main.java
+++ b/test/082-inline-execute/src/Main.java
@@ -1039,6 +1039,7 @@
     Assert.assertEquals(StrictMath.round(-2.9d), -3l);
     Assert.assertEquals(StrictMath.round(-3.0d), -3l);
     Assert.assertEquals(StrictMath.round(0.49999999999999994d), 0l);
+    Assert.assertEquals(StrictMath.round(9007199254740991.0d), 9007199254740991l);  // 2^53 - 1
     Assert.assertEquals(StrictMath.round(Double.NaN), (long)+0.0d);
     Assert.assertEquals(StrictMath.round(Long.MAX_VALUE + 1.0d), Long.MAX_VALUE);
     Assert.assertEquals(StrictMath.round(Long.MIN_VALUE - 1.0d), Long.MIN_VALUE);
@@ -1062,6 +1063,7 @@
     Assert.assertEquals(StrictMath.round(-3.0f), -3);
     // 0.4999999701976776123046875
     Assert.assertEquals(StrictMath.round(Float.intBitsToFloat(0x3EFFFFFF)), (int)+0.0f);
+    Assert.assertEquals(StrictMath.round(16777215.0f), 16777215);  // 2^24 - 1
     Assert.assertEquals(StrictMath.round(Float.NaN), (int)+0.0f);
     Assert.assertEquals(StrictMath.round(Integer.MAX_VALUE + 1.0f), Integer.MAX_VALUE);
     Assert.assertEquals(StrictMath.round(Integer.MIN_VALUE - 1.0f), Integer.MIN_VALUE);
diff --git a/test/449-checker-bce/src/Main.java b/test/449-checker-bce/src/Main.java
index 32bbc5b..66e1d92 100644
--- a/test/449-checker-bce/src/Main.java
+++ b/test/449-checker-bce/src/Main.java
@@ -137,20 +137,16 @@
   /// CHECK: ArraySet
   /// CHECK: BoundsCheck
   /// CHECK: ArraySet
-  /// CHECK: BoundsCheck
-  /// CHECK: ArraySet
 
   /// CHECK-START: void Main.$opt$noinline$constantIndexing2(int[]) BCE (after)
-  /// CHECK-NOT: Deoptimize
-  /// CHECK: BoundsCheck
+  /// CHECK: Deoptimize
+  /// CHECK-NOT: BoundsCheck
   /// CHECK: ArraySet
-  /// CHECK: BoundsCheck
+  /// CHECK-NOT: BoundsCheck
   /// CHECK: ArraySet
-  /// CHECK: BoundsCheck
+  /// CHECK-NOT: BoundsCheck
   /// CHECK: ArraySet
-  /// CHECK: BoundsCheck
-  /// CHECK: ArraySet
-  /// CHECK: BoundsCheck
+  /// CHECK-NOT: BoundsCheck
   /// CHECK: ArraySet
 
   static void $opt$noinline$constantIndexing2(int[] array) {
@@ -158,8 +154,7 @@
     array[2] = 1;
     array[3] = 1;
     array[4] = 1;
-    array[-1] = 1;  // prevents the whole opt on [-1:4]
-    if (array[1] == 1) {
+    if (array[1] != 1) {
       throw new Error("");
     }
   }
@@ -173,8 +168,41 @@
   /// CHECK: ArraySet
   /// CHECK: BoundsCheck
   /// CHECK: ArraySet
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
 
   /// CHECK-START: void Main.constantIndexing2b(int[]) BCE (after)
+  /// CHECK-NOT: Deoptimize
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+
+  static void constantIndexing2b(int[] array) {
+    array[0] = 6;
+    array[1] = 6;
+    array[2] = 6;
+    array[3] = 6;
+    array[-1] = 1;  // prevents the whole opt on [-1:4]
+  }
+
+  /// CHECK-START: void Main.constantIndexing2c(int[]) BCE (before)
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+  /// CHECK: BoundsCheck
+  /// CHECK: ArraySet
+
+  /// CHECK-START: void Main.constantIndexing2c(int[]) BCE (after)
   /// CHECK: Deoptimize
   /// CHECK-NOT: BoundsCheck
   /// CHECK: ArraySet
@@ -185,7 +213,7 @@
   /// CHECK-NOT: BoundsCheck
   /// CHECK: ArraySet
 
-  static void constantIndexing2b(int[] array) {
+  static void constantIndexing2c(int[] array) {
     array[0] = 7;
     array[1] = 7;
     array[2] = 7;
@@ -440,31 +468,37 @@
       System.out.println("constant indices 1 failed!");
     }
 
-    caught = false;
-    try {
-      $opt$noinline$constantIndexing2(a6);
-    } catch (ArrayIndexOutOfBoundsException e) {
-      caught = true;
-    }
-    if (!caught || a6[0] != 0 || a6[1] != 1 || a6[2] != 1 ||
-                   a6[3] != 1 || a6[4] != 1 || a6[5] != 11) {
+    $opt$noinline$constantIndexing2(a6);
+    if (a6[0] != 0 || a6[1] != 1 || a6[2] != 1 ||
+        a6[3] != 1 || a6[4] != 1 || a6[5] != 11) {
       System.out.println("constant indices 2 failed!");
     }
 
     caught = false;
     try {
-      constantIndexing2b(a1);
+      constantIndexing2b(a6);
+    } catch (ArrayIndexOutOfBoundsException e) {
+      caught = true;
+    }
+    if (!caught || a6[0] != 6 || a6[1] != 6 || a6[2] != 6 ||
+                   a6[3] != 6 || a6[4] != 1 || a6[5] != 11) {
+      System.out.println("constant indices 2b failed!");
+    }
+
+    caught = false;
+    try {
+      constantIndexing2c(a1);
     } catch (ArrayIndexOutOfBoundsException e) {
       caught = true;
     }
     if (!caught || a1[0] != 7) {
-      System.out.println("constant indices 2b failed!");
+      System.out.println("constant indices 2c failed!");
     }
 
-    constantIndexing2b(a6);
+    constantIndexing2c(a6);
     if (a6[0] != 7 || a6[1] != 7 || a6[2] != 7 ||
         a6[3] != 7 || a6[4] != 1 || a6[5] != 11) {
-      System.out.println("constant indices 2b failed!");
+      System.out.println("constant indices 2c failed!");
     }
 
     int[] b4 = new int[4];
diff --git a/test/530-checker-loops/src/Main.java b/test/530-checker-loops/src/Main.java
index 8633745..d5111b0 100644
--- a/test/530-checker-loops/src/Main.java
+++ b/test/530-checker-loops/src/Main.java
@@ -394,6 +394,34 @@
     return result;
   }
 
+  /// CHECK-START: int Main.linearForNEArrayLengthUp(int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: int Main.linearForNEArrayLengthUp(int[]) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  private static int linearForNEArrayLengthUp(int[] x) {
+    int result = 0;
+    for (int i = 0; i != x.length; i++) {
+      result += x[i];
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.linearForNEArrayLengthDown(int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  //
+  /// CHECK-START: int Main.linearForNEArrayLengthDown(int[]) BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-NOT: Deoptimize
+  private static int linearForNEArrayLengthDown(int[] x) {
+    int result = 0;
+    for (int i = x.length - 1; i != -1; i--) {
+      result += x[i];
+    }
+    return result;
+  }
+
   /// CHECK-START: int Main.linearDoWhileUp() BCE (before)
   /// CHECK-DAG: BoundsCheck
   //
@@ -670,6 +698,8 @@
     // Special forms.
     expectEquals(55, linearForNEUp());
     expectEquals(55, linearForNEDown());
+    expectEquals(55, linearForNEArrayLengthUp(x));
+    expectEquals(55, linearForNEArrayLengthDown(x));
     expectEquals(55, linearDoWhileUp());
     expectEquals(55, linearDoWhileDown());
     expectEquals(55, linearShort());
diff --git a/test/564-checker-negbitwise/expected.txt b/test/564-checker-negbitwise/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/564-checker-negbitwise/expected.txt
diff --git a/test/564-checker-negbitwise/info.txt b/test/564-checker-negbitwise/info.txt
new file mode 100644
index 0000000..28b9e9e
--- /dev/null
+++ b/test/564-checker-negbitwise/info.txt
@@ -0,0 +1 @@
+Test negated bitwise operations simplification on ARM64.
diff --git a/test/564-checker-negbitwise/src/Main.java b/test/564-checker-negbitwise/src/Main.java
new file mode 100644
index 0000000..3de7be7
--- /dev/null
+++ b/test/564-checker-negbitwise/src/Main.java
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+  // A dummy value to defeat inlining of these routines.
+  static boolean doThrow = false;
+
+  public static void assertIntEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  public static void assertLongEquals(long expected, long result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  /**
+   * Test merging of `NOT+AND` into `BIC`.
+   */
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notAnd(int, int) instruction_simplifier_arm64 (before)
+  /// CHECK:       <<Base:i\d+>>        ParameterValue
+  /// CHECK:       <<Mask:i\d+>>        ParameterValue
+  /// CHECK:       <<Not:i\d+>>         Not [<<Mask>>]
+  /// CHECK:       <<Op:i\d+>>          And [<<Base>>,<<Not>>]
+  /// CHECK:                            Return [<<Op>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notAnd(int, int) instruction_simplifier_arm64 (after)
+  /// CHECK:       <<Base:i\d+>>        ParameterValue
+  /// CHECK:       <<Mask:i\d+>>        ParameterValue
+  /// CHECK:       <<NegOp:i\d+>>       Arm64BitwiseNegatedRight [<<Base>>,<<Mask>>] kind:And
+  /// CHECK:                            Return [<<NegOp>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notAnd(int, int) instruction_simplifier_arm64 (after)
+  /// CHECK-NOT:                        Not
+  /// CHECK-NOT:                        And
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notAnd(int, int) disassembly (after)
+  /// CHECK:                            bic w{{\d+}}, w{{\d+}}, w{{\d+}}
+
+  public static int $opt$noinline$notAnd(int base, int mask) {
+    if (doThrow) throw new Error();
+    return base & ~mask;
+  }
+
+  /**
+   * Test merging of `NOT+ORR` into `ORN`.
+   */
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$notOr(long, long) instruction_simplifier_arm64 (before)
+  /// CHECK:       <<Base:j\d+>>        ParameterValue
+  /// CHECK:       <<Mask:j\d+>>        ParameterValue
+  /// CHECK:       <<Not:j\d+>>         Not [<<Mask>>]
+  /// CHECK:       <<Op:j\d+>>          Or [<<Base>>,<<Not>>]
+  /// CHECK:                            Return [<<Op>>]
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$notOr(long, long) instruction_simplifier_arm64 (after)
+  /// CHECK:       <<Base:j\d+>>        ParameterValue
+  /// CHECK:       <<Mask:j\d+>>        ParameterValue
+  /// CHECK:       <<NegOp:j\d+>>       Arm64BitwiseNegatedRight [<<Base>>,<<Mask>>] kind:Or
+  /// CHECK:                            Return [<<NegOp>>]
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$notOr(long, long) instruction_simplifier_arm64 (after)
+  /// CHECK-NOT:                        Not
+  /// CHECK-NOT:                        Or
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$notOr(long, long) disassembly (after)
+  /// CHECK:                            orn x{{\d+}}, x{{\d+}}, x{{\d+}}
+
+  public static long $opt$noinline$notOr(long base, long mask) {
+    if (doThrow) throw new Error();
+    return base | ~mask;
+  }
+
+  /**
+   * Test merging of `NOT+EOR` into `EON`.
+   */
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notXor(int, int) instruction_simplifier_arm64 (before)
+  /// CHECK:       <<Base:i\d+>>        ParameterValue
+  /// CHECK:       <<Mask:i\d+>>        ParameterValue
+  /// CHECK:       <<Not:i\d+>>         Not [<<Mask>>]
+  /// CHECK:       <<Op:i\d+>>          Xor [<<Base>>,<<Not>>]
+  /// CHECK:                            Return [<<Op>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notXor(int, int) instruction_simplifier_arm64 (after)
+  /// CHECK:       <<Base:i\d+>>        ParameterValue
+  /// CHECK:       <<Mask:i\d+>>        ParameterValue
+  /// CHECK:       <<NegOp:i\d+>>       Arm64BitwiseNegatedRight [<<Base>>,<<Mask>>] kind:Xor
+  /// CHECK:                            Return [<<NegOp>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notXor(int, int) instruction_simplifier_arm64 (after)
+  /// CHECK-NOT:                        Not
+  /// CHECK-NOT:                        Xor
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notXor(int, int) disassembly (after)
+  /// CHECK:                            eon w{{\d+}}, w{{\d+}}, w{{\d+}}
+
+  public static int $opt$noinline$notXor(int base, int mask) {
+    if (doThrow) throw new Error();
+    return base ^ ~mask;
+  }
+
+  /**
+   * Check that the transformation is also done when the base is a constant.
+   */
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notXorConstant(int) instruction_simplifier_arm64 (before)
+  /// CHECK:       <<Mask:i\d+>>        ParameterValue
+  /// CHECK:       <<Constant:i\d+>>    IntConstant
+  /// CHECK:       <<Not:i\d+>>         Not [<<Mask>>]
+  /// CHECK:       <<Op:i\d+>>          Xor [<<Not>>,<<Constant>>]
+  /// CHECK:                            Return [<<Op>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notXorConstant(int) instruction_simplifier_arm64 (after)
+  /// CHECK:       <<Mask:i\d+>>        ParameterValue
+  /// CHECK:       <<Constant:i\d+>>    IntConstant
+  /// CHECK:       <<NegOp:i\d+>>       Arm64BitwiseNegatedRight [<<Constant>>,<<Mask>>] kind:Xor
+  /// CHECK:                            Return [<<NegOp>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notXorConstant(int) instruction_simplifier_arm64 (after)
+  /// CHECK-NOT:                        Not
+  /// CHECK-NOT:                        Xor
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notXorConstant(int) disassembly (after)
+  /// CHECK:                            mov <<Reg:w\d+>>, #0xf
+  /// CHECK:                            eon w{{\d+}}, <<Reg>>, w{{\d+}}
+
+  public static int $opt$noinline$notXorConstant(int mask) {
+    if (doThrow) throw new Error();
+    return 0xf ^ ~mask;
+  }
+
+  /**
+   * Check that no transformation is done when Not has multiple uses.
+   */
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notAndMultipleUses(int, int) instruction_simplifier_arm64 (before)
+  /// CHECK:       <<Base:i\d+>>        ParameterValue
+  /// CHECK:       <<Mask:i\d+>>        ParameterValue
+  /// CHECK:       <<One:i\d+>>         IntConstant
+  /// CHECK:       <<Not:i\d+>>         Not [<<Mask>>]
+  /// CHECK:       <<Op1:i\d+>>         And [<<Not>>,<<One>>]
+  /// CHECK:       <<Op2:i\d+>>         And [<<Base>>,<<Not>>]
+  /// CHECK:       <<Add:i\d+>>         Add [<<Op1>>,<<Op2>>]
+  /// CHECK:                            Return [<<Add>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notAndMultipleUses(int, int) instruction_simplifier_arm64 (after)
+  /// CHECK:       <<Base:i\d+>>        ParameterValue
+  /// CHECK:       <<Mask:i\d+>>        ParameterValue
+  /// CHECK:       <<One:i\d+>>         IntConstant
+  /// CHECK:       <<Not:i\d+>>         Not [<<Mask>>]
+  /// CHECK:       <<Op1:i\d+>>         And [<<Not>>,<<One>>]
+  /// CHECK:       <<Op2:i\d+>>         And [<<Base>>,<<Not>>]
+  /// CHECK:       <<Add:i\d+>>         Add [<<Op1>>,<<Op2>>]
+  /// CHECK:                            Return [<<Add>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$notAndMultipleUses(int, int) instruction_simplifier_arm64 (after)
+  /// CHECK-NOT:                        Arm64BitwiseNegatedRight
+
+  public static int $opt$noinline$notAndMultipleUses(int base, int mask) {
+    if (doThrow) throw new Error();
+    int tmp = ~mask;
+    return (tmp & 0x1) + (base & tmp);
+  }
+
+  /**
+   * Check that no transformation is done when both inputs are Not's.
+   */
+
+  // We don't check the instructions before the pass, since if De Morgan's laws
+  // have been applied then Not/Not/Or is replaced by And/Not.
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$deMorganOr(int, int) instruction_simplifier_arm64 (after)
+  /// CHECK-NOT:                        Arm64BitwiseNegatedRight
+
+  public static int $opt$noinline$deMorganOr(int a, int b) {
+    if (doThrow) throw new Error();
+    return ~a | ~b;
+  }
+
+  public static void main(String[] args) {
+    assertIntEquals(0xe,   $opt$noinline$notAnd(0xf, 0x1));
+    assertLongEquals(~0x0, $opt$noinline$notOr(0xf, 0x1));
+    assertIntEquals(~0xe,  $opt$noinline$notXor(0xf, 0x1));
+    assertIntEquals(~0xe,  $opt$noinline$notXorConstant(0x1));
+    assertIntEquals(0xe,   $opt$noinline$notAndMultipleUses(0xf, 0x1));
+    assertIntEquals(~0x1,  $opt$noinline$deMorganOr(0x3, 0x1));
+  }
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 364be59..167ad85 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -220,6 +220,18 @@
         $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
         $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(ART_TEST_RUN_TEST_SKIP), $(ALL_ADDRESS_SIZES))
 
+
+# Disable 097-duplicate-method while investigation (broken by latest Jack release, b/27358065)
+TEST_ART_BROKEN_ALL_TARGET_TESTS := \
+  097-duplicate-method
+
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+    $(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+    $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_ALL_TARGET_TESTS), \
+    $(ALL_ADDRESS_SIZES))
+
+TEST_ART_BROKEN_ALL_TARGET_TESTS :=
+
 # Tests that are timing sensitive and flaky on heavily loaded systems.
 TEST_ART_TIMING_SENSITIVE_RUN_TESTS := \
   002-sleep \