Merge "Fix MIPS64 boot"
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 2897006..6a743eb 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -41,6 +41,7 @@
 #include "driver/dex_compilation_unit.h"
 #include "gc_map_builder.h"
 #include "graph_visualizer.h"
+#include "intrinsics.h"
 #include "leb128.h"
 #include "mapping_table.h"
 #include "mirror/array-inl.h"
@@ -1381,4 +1382,57 @@
   }
 }
 
+void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) {
+  // Check to see if we have known failures that will cause us to have to bail out
+  // to the runtime, and just generate the runtime call directly.
+  HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
+  HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
+
+  // The positions must be non-negative.
+  if ((src_pos != nullptr && src_pos->GetValue() < 0) ||
+      (dest_pos != nullptr && dest_pos->GetValue() < 0)) {
+    // We will have to fail anyways.
+    return;
+  }
+
+  // The length must be >= 0.
+  HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
+  if (length != nullptr) {
+    int32_t len = length->GetValue();
+    if (len < 0) {
+      // Just call as normal.
+      return;
+    }
+  }
+
+  SystemArrayCopyOptimizations optimizations(invoke);
+
+  if (optimizations.GetDestinationIsSource()) {
+    if (src_pos != nullptr && dest_pos != nullptr && src_pos->GetValue() < dest_pos->GetValue()) {
+      // We only support backward copying if source and destination are the same.
+      return;
+    }
+  }
+
+  if (optimizations.GetDestinationIsPrimitiveArray() || optimizations.GetSourceIsPrimitiveArray()) {
+    // We currently don't intrinsify primitive copying.
+    return;
+  }
+
+  ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
+  LocationSummary* locations = new (allocator) LocationSummary(invoke,
+                                                               LocationSummary::kCallOnSlowPath,
+                                                               kIntrinsified);
+  // arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+  locations->SetInAt(2, Location::RequiresRegister());
+  locations->SetInAt(3, Location::RegisterOrConstant(invoke->InputAt(3)));
+  locations->SetInAt(4, Location::RegisterOrConstant(invoke->InputAt(4)));
+
+  locations->AddTemp(Location::RequiresRegister());
+  locations->AddTemp(Location::RequiresRegister());
+  locations->AddTemp(Location::RequiresRegister());
+}
+
 }  // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index acce5b3..b04dfc0 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -421,6 +421,8 @@
                                              Location runtime_type_index_location,
                                              Location runtime_return_location);
 
+  static void CreateSystemArrayCopyLocationSummary(HInvoke* invoke);
+
   void SetDisassemblyInformation(DisassemblyInformation* info) { disasm_info_ = info; }
   DisassemblyInformation* GetDisassemblyInformation() const { return disasm_info_; }
 
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index d172fba..bef576c 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1645,6 +1645,7 @@
   DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
 
   IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
+                                         codegen_->GetAssembler(),
                                          codegen_->GetInstructionSetFeatures());
   if (intrinsic.TryDispatch(invoke)) {
     return;
@@ -1684,6 +1685,7 @@
 
 void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
   IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
+                                         codegen_->GetAssembler(),
                                          codegen_->GetInstructionSetFeatures());
   if (intrinsic.TryDispatch(invoke)) {
     return;
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 2793793..58e479a 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1307,6 +1307,308 @@
   __ Bind(slow_path->GetExitLabel());
 }
 
+void IntrinsicLocationsBuilderARM::VisitSystemArrayCopy(HInvoke* invoke) {
+  CodeGenerator::CreateSystemArrayCopyLocationSummary(invoke);
+  LocationSummary* locations = invoke->GetLocations();
+  if (locations == nullptr) {
+    return;
+  }
+
+  HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
+  HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
+  HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
+
+  if (src_pos != nullptr && !assembler_->ShifterOperandCanAlwaysHold(src_pos->GetValue())) {
+    locations->SetInAt(1, Location::RequiresRegister());
+  }
+  if (dest_pos != nullptr && !assembler_->ShifterOperandCanAlwaysHold(dest_pos->GetValue())) {
+    locations->SetInAt(3, Location::RequiresRegister());
+  }
+  if (length != nullptr && !assembler_->ShifterOperandCanAlwaysHold(length->GetValue())) {
+    locations->SetInAt(4, Location::RequiresRegister());
+  }
+}
+
+static void CheckPosition(ArmAssembler* assembler,
+                          Location pos,
+                          Register input,
+                          Location length,
+                          SlowPathCode* slow_path,
+                          Register input_len,
+                          Register temp,
+                          bool length_is_input_length = false) {
+  // Where is the length in the Array?
+  const uint32_t length_offset = mirror::Array::LengthOffset().Uint32Value();
+
+  if (pos.IsConstant()) {
+    int32_t pos_const = pos.GetConstant()->AsIntConstant()->GetValue();
+    if (pos_const == 0) {
+      if (!length_is_input_length) {
+        // Check that length(input) >= length.
+        __ LoadFromOffset(kLoadWord, temp, input, length_offset);
+        if (length.IsConstant()) {
+          __ cmp(temp, ShifterOperand(length.GetConstant()->AsIntConstant()->GetValue()));
+        } else {
+          __ cmp(temp, ShifterOperand(length.AsRegister<Register>()));
+        }
+        __ b(slow_path->GetEntryLabel(), LT);
+      }
+    } else {
+      // Check that length(input) >= pos.
+      __ LoadFromOffset(kLoadWord, input_len, input, length_offset);
+      __ subs(temp, input_len, ShifterOperand(pos_const));
+      __ b(slow_path->GetEntryLabel(), LT);
+
+      // Check that (length(input) - pos) >= length.
+      if (length.IsConstant()) {
+        __ cmp(temp, ShifterOperand(length.GetConstant()->AsIntConstant()->GetValue()));
+      } else {
+        __ cmp(temp, ShifterOperand(length.AsRegister<Register>()));
+      }
+      __ b(slow_path->GetEntryLabel(), LT);
+    }
+  } else if (length_is_input_length) {
+    // The only way the copy can succeed is if pos is zero.
+    Register pos_reg = pos.AsRegister<Register>();
+    __ CompareAndBranchIfNonZero(pos_reg, slow_path->GetEntryLabel());
+  } else {
+    // Check that pos >= 0.
+    Register pos_reg = pos.AsRegister<Register>();
+    __ cmp(pos_reg, ShifterOperand(0));
+    __ b(slow_path->GetEntryLabel(), LT);
+
+    // Check that pos <= length(input).
+    __ LoadFromOffset(kLoadWord, temp, input, length_offset);
+    __ subs(temp, temp, ShifterOperand(pos_reg));
+    __ b(slow_path->GetEntryLabel(), LT);
+
+    // Check that (length(input) - pos) >= length.
+    if (length.IsConstant()) {
+      __ cmp(temp, ShifterOperand(length.GetConstant()->AsIntConstant()->GetValue()));
+    } else {
+      __ cmp(temp, ShifterOperand(length.AsRegister<Register>()));
+    }
+    __ b(slow_path->GetEntryLabel(), LT);
+  }
+}
+
+void IntrinsicCodeGeneratorARM::VisitSystemArrayCopy(HInvoke* invoke) {
+  ArmAssembler* assembler = GetAssembler();
+  LocationSummary* locations = invoke->GetLocations();
+
+  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+
+  Register src = locations->InAt(0).AsRegister<Register>();
+  Location src_pos = locations->InAt(1);
+  Register dest = locations->InAt(2).AsRegister<Register>();
+  Location dest_pos = locations->InAt(3);
+  Location length = locations->InAt(4);
+  Register temp1 = locations->GetTemp(0).AsRegister<Register>();
+  Register temp2 = locations->GetTemp(1).AsRegister<Register>();
+  Register temp3 = locations->GetTemp(2).AsRegister<Register>();
+
+  SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathARM(invoke);
+  codegen_->AddSlowPath(slow_path);
+
+  Label ok;
+  SystemArrayCopyOptimizations optimizations(invoke);
+
+  if (!optimizations.GetDestinationIsSource()) {
+    if (!src_pos.IsConstant() || !dest_pos.IsConstant()) {
+      __ cmp(src, ShifterOperand(dest));
+    }
+  }
+
+  // If source and destination are the same, we go to slow path if we need to do
+  // forward copying.
+  if (src_pos.IsConstant()) {
+    int32_t src_pos_constant = src_pos.GetConstant()->AsIntConstant()->GetValue();
+    if (dest_pos.IsConstant()) {
+      // Checked when building locations.
+      DCHECK(!optimizations.GetDestinationIsSource()
+             || (src_pos_constant >= dest_pos.GetConstant()->AsIntConstant()->GetValue()));
+    } else {
+      if (!optimizations.GetDestinationIsSource()) {
+        __ b(&ok, NE);
+      }
+      __ cmp(dest_pos.AsRegister<Register>(), ShifterOperand(src_pos_constant));
+      __ b(slow_path->GetEntryLabel(), GT);
+    }
+  } else {
+    if (!optimizations.GetDestinationIsSource()) {
+      __ b(&ok, NE);
+    }
+    if (dest_pos.IsConstant()) {
+      int32_t dest_pos_constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
+      __ cmp(src_pos.AsRegister<Register>(), ShifterOperand(dest_pos_constant));
+    } else {
+      __ cmp(src_pos.AsRegister<Register>(), ShifterOperand(dest_pos.AsRegister<Register>()));
+    }
+    __ b(slow_path->GetEntryLabel(), LT);
+  }
+
+  __ Bind(&ok);
+
+  if (!optimizations.GetSourceIsNotNull()) {
+    // Bail out if the source is null.
+    __ CompareAndBranchIfZero(src, slow_path->GetEntryLabel());
+  }
+
+  if (!optimizations.GetDestinationIsNotNull() && !optimizations.GetDestinationIsSource()) {
+    // Bail out if the destination is null.
+    __ CompareAndBranchIfZero(dest, slow_path->GetEntryLabel());
+  }
+
+  // If the length is negative, bail out.
+  // We have already checked in the LocationsBuilder for the constant case.
+  if (!length.IsConstant() &&
+      !optimizations.GetCountIsSourceLength() &&
+      !optimizations.GetCountIsDestinationLength()) {
+    __ cmp(length.AsRegister<Register>(), ShifterOperand(0));
+    __ b(slow_path->GetEntryLabel(), LT);
+  }
+
+  // Validity checks: source.
+  CheckPosition(assembler,
+                src_pos,
+                src,
+                length,
+                slow_path,
+                temp1,
+                temp2,
+                optimizations.GetCountIsSourceLength());
+
+  // Validity checks: dest.
+  CheckPosition(assembler,
+                dest_pos,
+                dest,
+                length,
+                slow_path,
+                temp1,
+                temp2,
+                optimizations.GetCountIsDestinationLength());
+
+  if (!optimizations.GetDoesNotNeedTypeCheck()) {
+    // Check whether all elements of the source array are assignable to the component
+    // type of the destination array. We do two checks: the classes are the same,
+    // or the destination is Object[]. If none of these checks succeed, we go to the
+    // slow path.
+    __ LoadFromOffset(kLoadWord, temp1, dest, class_offset);
+    __ LoadFromOffset(kLoadWord, temp2, src, class_offset);
+    bool did_unpoison = false;
+    if (!optimizations.GetDestinationIsNonPrimitiveArray() ||
+        !optimizations.GetSourceIsNonPrimitiveArray()) {
+      // One or two of the references need to be unpoisoned. Unpoisoned them
+      // both to make the identity check valid.
+      __ MaybeUnpoisonHeapReference(temp1);
+      __ MaybeUnpoisonHeapReference(temp2);
+      did_unpoison = true;
+    }
+
+    if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
+      // Bail out if the destination is not a non primitive array.
+      __ LoadFromOffset(kLoadWord, temp3, temp1, component_offset);
+      __ CompareAndBranchIfZero(temp3, slow_path->GetEntryLabel());
+      __ MaybeUnpoisonHeapReference(temp3);
+      __ LoadFromOffset(kLoadUnsignedHalfword, temp3, temp3, primitive_offset);
+      static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+      __ CompareAndBranchIfNonZero(temp3, slow_path->GetEntryLabel());
+    }
+
+    if (!optimizations.GetSourceIsNonPrimitiveArray()) {
+      // Bail out if the source is not a non primitive array.
+      // Bail out if the destination is not a non primitive array.
+      __ LoadFromOffset(kLoadWord, temp3, temp2, component_offset);
+      __ CompareAndBranchIfZero(temp3, slow_path->GetEntryLabel());
+      __ MaybeUnpoisonHeapReference(temp3);
+      __ LoadFromOffset(kLoadUnsignedHalfword, temp3, temp3, primitive_offset);
+      static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+      __ CompareAndBranchIfNonZero(temp3, slow_path->GetEntryLabel());
+    }
+
+    __ cmp(temp1, ShifterOperand(temp2));
+
+    if (optimizations.GetDestinationIsTypedObjectArray()) {
+      Label do_copy;
+      __ b(&do_copy, EQ);
+      if (!did_unpoison) {
+        __ MaybeUnpoisonHeapReference(temp1);
+      }
+      __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
+      __ MaybeUnpoisonHeapReference(temp1);
+      __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
+      // No need to unpoison the result, we're comparing against null.
+      __ CompareAndBranchIfNonZero(temp1, slow_path->GetEntryLabel());
+      __ Bind(&do_copy);
+    } else {
+      __ b(slow_path->GetEntryLabel(), NE);
+    }
+  } else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
+    DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
+    // Bail out if the source is not a non primitive array.
+    __ LoadFromOffset(kLoadWord, temp1, src, class_offset);
+    __ MaybeUnpoisonHeapReference(temp1);
+    __ LoadFromOffset(kLoadWord, temp3, temp1, component_offset);
+    __ CompareAndBranchIfZero(temp3, slow_path->GetEntryLabel());
+    __ MaybeUnpoisonHeapReference(temp3);
+    __ LoadFromOffset(kLoadUnsignedHalfword, temp3, temp3, primitive_offset);
+    static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+    __ CompareAndBranchIfNonZero(temp3, slow_path->GetEntryLabel());
+  }
+
+  // Compute base source address, base destination address, and end source address.
+
+  uint32_t element_size = sizeof(int32_t);
+  uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value();
+  if (src_pos.IsConstant()) {
+    int32_t constant = src_pos.GetConstant()->AsIntConstant()->GetValue();
+    __ AddConstant(temp1, src, element_size * constant + offset);
+  } else {
+    __ add(temp1, src, ShifterOperand(src_pos.AsRegister<Register>(), LSL, 2));
+    __ AddConstant(temp1, offset);
+  }
+
+  if (dest_pos.IsConstant()) {
+    int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
+    __ AddConstant(temp2, dest, element_size * constant + offset);
+  } else {
+    __ add(temp2, dest, ShifterOperand(dest_pos.AsRegister<Register>(), LSL, 2));
+    __ AddConstant(temp2, offset);
+  }
+
+  if (length.IsConstant()) {
+    int32_t constant = length.GetConstant()->AsIntConstant()->GetValue();
+    __ AddConstant(temp3, temp1, element_size * constant);
+  } else {
+    __ add(temp3, temp1, ShifterOperand(length.AsRegister<Register>(), LSL, 2));
+  }
+
+  // Iterate over the arrays and do a raw copy of the objects. We don't need to
+  // poison/unpoison, nor do any read barrier as the next uses of the destination
+  // array will do it.
+  Label loop, done;
+  __ cmp(temp1, ShifterOperand(temp3));
+  __ b(&done, EQ);
+  __ Bind(&loop);
+  __ ldr(IP, Address(temp1, element_size, Address::PostIndex));
+  __ str(IP, Address(temp2, element_size, Address::PostIndex));
+  __ cmp(temp1, ShifterOperand(temp3));
+  __ b(&loop, NE);
+  __ Bind(&done);
+
+  // We only need one card marking on the destination array.
+  codegen_->MarkGCCard(temp1,
+                       temp2,
+                       dest,
+                       Register(kNoRegister),
+                       false);
+
+  __ Bind(slow_path->GetExitLabel());
+}
+
 // Unimplemented intrinsics.
 
 #define UNIMPLEMENTED_INTRINSIC(Name)                                                  \
@@ -1333,7 +1635,6 @@
 UNIMPLEMENTED_INTRINSIC(MathRoundFloat)    // Could be done by changing rounding mode, maybe?
 UNIMPLEMENTED_INTRINSIC(UnsafeCASLong)     // High register pressure.
 UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
-UNIMPLEMENTED_INTRINSIC(SystemArrayCopy)
 UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
 UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
 
diff --git a/compiler/optimizing/intrinsics_arm.h b/compiler/optimizing/intrinsics_arm.h
index 2abb605..127e9a4 100644
--- a/compiler/optimizing/intrinsics_arm.h
+++ b/compiler/optimizing/intrinsics_arm.h
@@ -33,8 +33,10 @@
 
 class IntrinsicLocationsBuilderARM FINAL : public IntrinsicVisitor {
  public:
-  IntrinsicLocationsBuilderARM(ArenaAllocator* arena, const ArmInstructionSetFeatures& features)
-      : arena_(arena), features_(features) {}
+  IntrinsicLocationsBuilderARM(ArenaAllocator* arena,
+                               ArmAssembler* assembler,
+                               const ArmInstructionSetFeatures& features)
+      : arena_(arena), assembler_(assembler), features_(features) {}
 
   // Define visitor methods.
 
@@ -52,6 +54,7 @@
 
  private:
   ArenaAllocator* arena_;
+  ArmAssembler* assembler_;
 
   const ArmInstructionSetFeatures& features_;
 
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 1061aae..e0d88a9 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -914,55 +914,7 @@
 
 
 void IntrinsicLocationsBuilderX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
-  // Check to see if we have known failures that will cause us to have to bail out
-  // to the runtime, and just generate the runtime call directly.
-  HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
-  HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
-
-  // The positions must be non-negative.
-  if ((src_pos != nullptr && src_pos->GetValue() < 0) ||
-      (dest_pos != nullptr && dest_pos->GetValue() < 0)) {
-    // We will have to fail anyways.
-    return;
-  }
-
-  // The length must be > 0.
-  HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
-  if (length != nullptr) {
-    int32_t len = length->GetValue();
-    if (len < 0) {
-      // Just call as normal.
-      return;
-    }
-  }
-
-  SystemArrayCopyOptimizations optimizations(invoke);
-
-  if (optimizations.GetDestinationIsSource()) {
-    if (src_pos != nullptr && dest_pos != nullptr && src_pos->GetValue() < dest_pos->GetValue()) {
-      // We only support backward copying if source and destination are the same.
-      return;
-    }
-  }
-
-  if (optimizations.GetDestinationIsPrimitiveArray() || optimizations.GetSourceIsPrimitiveArray()) {
-    // We currently don't intrinsify primitive copying.
-    return;
-  }
-
-  LocationSummary* locations = new (arena_) LocationSummary(invoke,
-                                                            LocationSummary::kCallOnSlowPath,
-                                                            kIntrinsified);
-  // arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
-  locations->SetInAt(2, Location::RequiresRegister());
-  locations->SetInAt(3, Location::RegisterOrConstant(invoke->InputAt(3)));
-  locations->SetInAt(4, Location::RegisterOrConstant(invoke->InputAt(4)));
-
-  locations->AddTemp(Location::RequiresRegister());
-  locations->AddTemp(Location::RequiresRegister());
-  locations->AddTemp(Location::RequiresRegister());
+  CodeGenerator::CreateSystemArrayCopyLocationSummary(invoke);
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
@@ -990,7 +942,9 @@
   SystemArrayCopyOptimizations optimizations(invoke);
 
   if (!optimizations.GetDestinationIsSource()) {
-    __ cmpl(src, dest);
+    if (!src_pos.IsConstant() || !dest_pos.IsConstant()) {
+      __ cmpl(src, dest);
+    }
   }
 
   // If source and destination are the same, we go to slow path if we need to do
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index 967b191..69ecc8f 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -832,6 +832,8 @@
                                      uint32_t immediate,
                                      ShifterOperand* shifter_op) = 0;
 
+  virtual bool ShifterOperandCanAlwaysHold(uint32_t immediate);
+
   static bool IsInstructionForExceptionHandling(uintptr_t pc);
 
   virtual void CompareAndBranchIfZero(Register r, Label* label) = 0;
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index f7772ae..9ae7bb6 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -48,6 +48,11 @@
   return false;
 }
 
+bool Arm32Assembler::ShifterOperandCanAlwaysHold(uint32_t immediate) {
+  ShifterOperand shifter_op;
+  return ShifterOperandCanHoldArm32(immediate, &shifter_op);
+}
+
 bool Arm32Assembler::ShifterOperandCanHold(Register rd ATTRIBUTE_UNUSED,
                                            Register rn ATTRIBUTE_UNUSED,
                                            Opcode opcode ATTRIBUTE_UNUSED,
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index 3407369..b40c093 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -294,6 +294,7 @@
                              uint32_t immediate,
                              ShifterOperand* shifter_op) OVERRIDE;
 
+  bool ShifterOperandCanAlwaysHold(uint32_t immediate) OVERRIDE;
 
   static bool IsInstructionForExceptionHandling(uintptr_t pc);
 
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 0f6c4f5..2ec5586 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -390,6 +390,10 @@
   EmitLiterals();
 }
 
+bool Thumb2Assembler::ShifterOperandCanAlwaysHold(uint32_t immediate) {
+  return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
+}
+
 bool Thumb2Assembler::ShifterOperandCanHold(Register rd ATTRIBUTE_UNUSED,
                                             Register rn ATTRIBUTE_UNUSED,
                                             Opcode opcode,
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index a1a8927..cfed0ca 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -337,6 +337,8 @@
                              uint32_t immediate,
                              ShifterOperand* shifter_op) OVERRIDE;
 
+  bool ShifterOperandCanAlwaysHold(uint32_t immediate) OVERRIDE;
+
 
   static bool IsInstructionForExceptionHandling(uintptr_t pc);
 
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 537873f..ad64b68 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -214,11 +214,22 @@
   055-enum-performance \
   133-static-invoke-super
 
- # disable timing sensitive tests on "dist" builds.
+# Tests that require python3.
+TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS := \
+  960-default-smali \
+  961-default-iface-resolution-generated \
+  964-default-iface-init-generated \
+
+# disable timing sensitive tests on "dist" builds.
 ifdef dist_goal
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
         $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
         $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+
+  # Currently disable tsts requiring python3.
+  ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+        $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+        $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS), $(ALL_ADDRESS_SIZES))
 endif
 
 TEST_ART_TIMING_SENSITIVE_RUN_TESTS :=