ARM: VIXL32: Implement ArrayGet, ArraySet, BoundsCheck etc.

Over 100 more ART tests now start to pass.

Test: export ART_USE_VIXL_ARM_BACKEND=true && \
      mma test-art-host dist && \
      mma test-art-target dist

Change-Id: I8b7c2e29886981d72057d36347bca0626aabfe81
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index f1d1135..cc40522 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -39,10 +39,10 @@
 
 using helpers::DRegisterFrom;
 using helpers::DWARFReg;
-using helpers::FromLowSToD;
 using helpers::HighDRegisterFrom;
 using helpers::HighRegisterFrom;
 using helpers::InputOperandAt;
+using helpers::InputRegister;
 using helpers::InputRegisterAt;
 using helpers::InputSRegisterAt;
 using helpers::InputVRegisterAt;
@@ -340,6 +340,46 @@
   DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARMVIXL);
 };
 
+class BoundsCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
+ public:
+  explicit BoundsCheckSlowPathARMVIXL(HBoundsCheck* instruction)
+      : SlowPathCodeARMVIXL(instruction) {}
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
+    LocationSummary* locations = instruction_->GetLocations();
+
+    __ Bind(GetEntryLabel());
+    if (instruction_->CanThrowIntoCatchBlock()) {
+      // Live registers will be restored in the catch block if caught.
+      SaveLiveRegisters(codegen, instruction_->GetLocations());
+    }
+    // We're moving two locations to locations that could overlap, so we need a parallel
+    // move resolver.
+    InvokeRuntimeCallingConventionARMVIXL calling_convention;
+    codegen->EmitParallelMoves(
+        locations->InAt(0),
+        LocationFrom(calling_convention.GetRegisterAt(0)),
+        Primitive::kPrimInt,
+        locations->InAt(1),
+        LocationFrom(calling_convention.GetRegisterAt(1)),
+        Primitive::kPrimInt);
+    QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt()
+        ? kQuickThrowStringBounds
+        : kQuickThrowArrayBounds;
+    arm_codegen->InvokeRuntime(entrypoint, instruction_, instruction_->GetDexPc(), this);
+    CheckEntrypointTypes<kQuickThrowStringBounds, void, int32_t, int32_t>();
+    CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
+  }
+
+  bool IsFatal() const OVERRIDE { return true; }
+
+  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARMVIXL"; }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARMVIXL);
+};
+
 class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
  public:
   LoadClassSlowPathARMVIXL(HLoadClass* cls, HInstruction* at, uint32_t dex_pc, bool do_clinit)
@@ -394,6 +434,66 @@
   DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARMVIXL);
 };
 
+class DeoptimizationSlowPathARMVIXL : public SlowPathCodeARMVIXL {
+ public:
+  explicit DeoptimizationSlowPathARMVIXL(HDeoptimize* instruction)
+      : SlowPathCodeARMVIXL(instruction) {}
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
+    __ Bind(GetEntryLabel());
+    arm_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
+    CheckEntrypointTypes<kQuickDeoptimize, void, void>();
+  }
+
+  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARMVIXL"; }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARMVIXL);
+};
+
+class ArraySetSlowPathARMVIXL : public SlowPathCodeARMVIXL {
+ public:
+  explicit ArraySetSlowPathARMVIXL(HInstruction* instruction) : SlowPathCodeARMVIXL(instruction) {}
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
+    __ Bind(GetEntryLabel());
+    SaveLiveRegisters(codegen, locations);
+
+    InvokeRuntimeCallingConventionARMVIXL calling_convention;
+    HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+    parallel_move.AddMove(
+        locations->InAt(0),
+        LocationFrom(calling_convention.GetRegisterAt(0)),
+        Primitive::kPrimNot,
+        nullptr);
+    parallel_move.AddMove(
+        locations->InAt(1),
+        LocationFrom(calling_convention.GetRegisterAt(1)),
+        Primitive::kPrimInt,
+        nullptr);
+    parallel_move.AddMove(
+        locations->InAt(2),
+        LocationFrom(calling_convention.GetRegisterAt(2)),
+        Primitive::kPrimNot,
+        nullptr);
+    codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+
+    CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
+    arm_codegen->InvokeRuntime(kQuickAputObject, instruction_, instruction_->GetDexPc(), this);
+    CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
+    RestoreLiveRegisters(codegen, locations);
+    __ B(GetExitLabel());
+  }
+
+  const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARMVIXL"; }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARMVIXL);
+};
+
+
 inline vixl32::Condition ARMCondition(IfCondition cond) {
   switch (cond) {
     case kCondEQ: return eq;
@@ -795,14 +895,14 @@
       __ Vcmp(F32, InputSRegisterAt(instruction, 0), 0.0);
     } else {
       DCHECK_EQ(type, Primitive::kPrimDouble);
-      __ Vcmp(F64, FromLowSToD(LowSRegisterFrom(lhs_loc)), 0.0);
+      __ Vcmp(F64, DRegisterFrom(lhs_loc), 0.0);
     }
   } else {
     if (type == Primitive::kPrimFloat) {
       __ Vcmp(InputSRegisterAt(instruction, 0), InputSRegisterAt(instruction, 1));
     } else {
       DCHECK_EQ(type, Primitive::kPrimDouble);
-      __ Vcmp(FromLowSToD(LowSRegisterFrom(lhs_loc)), FromLowSToD(LowSRegisterFrom(rhs_loc)));
+      __ Vcmp(DRegisterFrom(lhs_loc), DRegisterFrom(rhs_loc));
     }
   }
 }
@@ -1028,6 +1128,24 @@
   GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
 }
 
+void LocationsBuilderARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) {
+  LocationSummary* locations = new (GetGraph()->GetArena())
+      LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
+  locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
+  if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
+    locations->SetInAt(0, Location::RequiresRegister());
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) {
+  SlowPathCodeARMVIXL* slow_path =
+      deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARMVIXL>(deoptimize);
+  GenerateTestAndBranch(deoptimize,
+                        /* condition_input_index */ 0,
+                        slow_path->GetEntryLabel(),
+                        /* false_target */ nullptr);
+}
+
 void LocationsBuilderARMVIXL::VisitSelect(HSelect* select) {
   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
   if (Primitive::IsFloatingPointType(select->GetType())) {
@@ -1250,7 +1368,8 @@
   locations->SetOut(Location::ConstantLocation(constant));
 }
 
-void InstructionCodeGeneratorARMVIXL::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
+void InstructionCodeGeneratorARMVIXL::VisitFloatConstant(
+    HFloatConstant* constant ATTRIBUTE_UNUSED) {
   // Will be generated at use site.
 }
 
@@ -1260,7 +1379,8 @@
   locations->SetOut(Location::ConstantLocation(constant));
 }
 
-void InstructionCodeGeneratorARMVIXL::VisitDoubleConstant(HDoubleConstant* constant ATTRIBUTE_UNUSED) {
+void InstructionCodeGeneratorARMVIXL::VisitDoubleConstant(
+    HDoubleConstant* constant ATTRIBUTE_UNUSED) {
   // Will be generated at use site.
 }
 
@@ -1693,7 +1813,7 @@
         case Primitive::kPrimDouble: {
           // Processing a Dex `double-to-int' instruction.
           vixl32::SRegister temp_s = LowSRegisterFrom(locations->GetTemp(0));
-          __ Vcvt(I32, F64, temp_s, FromLowSToD(LowSRegisterFrom(in)));
+          __ Vcvt(I32, F64, temp_s, DRegisterFrom(in));
           __ Vmov(OutputRegister(conversion), temp_s);
           break;
         }
@@ -1781,7 +1901,7 @@
 
         case Primitive::kPrimDouble:
           // Processing a Dex `double-to-float' instruction.
-          __ Vcvt(F32, F64, OutputSRegister(conversion), FromLowSToD(LowSRegisterFrom(in)));
+          __ Vcvt(F32, F64, OutputSRegister(conversion), DRegisterFrom(in));
           break;
 
         default:
@@ -1800,7 +1920,7 @@
         case Primitive::kPrimChar: {
           // Processing a Dex `int-to-double' instruction.
           __ Vmov(LowSRegisterFrom(out), InputRegisterAt(conversion, 0));
-          __ Vcvt(F64, I32, FromLowSToD(LowSRegisterFrom(out)), LowSRegisterFrom(out));
+          __ Vcvt(F64, I32, DRegisterFrom(out), LowSRegisterFrom(out));
           break;
         }
 
@@ -1810,13 +1930,12 @@
           vixl32::Register high = HighRegisterFrom(in);
 
           vixl32::SRegister out_s = LowSRegisterFrom(out);
-          vixl32::DRegister out_d = FromLowSToD(out_s);
+          vixl32::DRegister out_d = DRegisterFrom(out);
 
           vixl32::SRegister temp_s = LowSRegisterFrom(locations->GetTemp(0));
-          vixl32::DRegister temp_d = FromLowSToD(temp_s);
+          vixl32::DRegister temp_d = DRegisterFrom(locations->GetTemp(0));
 
-          vixl32::SRegister constant_s = LowSRegisterFrom(locations->GetTemp(1));
-          vixl32::DRegister constant_d = FromLowSToD(constant_s);
+          vixl32::DRegister constant_d = DRegisterFrom(locations->GetTemp(0));
 
           // temp_d = int-to-double(high)
           __ Vmov(temp_s, high);
@@ -1833,7 +1952,7 @@
 
         case Primitive::kPrimFloat:
           // Processing a Dex `float-to-double' instruction.
-          __ Vcvt(F64, F32, FromLowSToD(LowSRegisterFrom(out)), InputSRegisterAt(conversion, 0));
+          __ Vcvt(F64, F32, DRegisterFrom(out), InputSRegisterAt(conversion, 0));
           break;
 
         default:
@@ -2782,6 +2901,17 @@
   }
 }
 
+void LocationsBuilderARMVIXL::VisitBooleanNot(HBooleanNot* bool_not) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitBooleanNot(HBooleanNot* bool_not) {
+  __ Eor(OutputRegister(bool_not), InputRegister(bool_not), 1);
+}
+
 void LocationsBuilderARMVIXL::VisitCompare(HCompare* compare) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
@@ -3053,7 +3183,7 @@
     }
 
     case Primitive::kPrimDouble: {
-      vixl32::DRegister value_reg = FromLowSToD(LowSRegisterFrom(value));
+      vixl32::DRegister value_reg = DRegisterFrom(value);
       if (is_volatile && !atomic_ldrd_strd) {
         vixl32::Register value_reg_lo = RegisterFrom(locations->GetTemp(0));
         vixl32::Register value_reg_hi = RegisterFrom(locations->GetTemp(1));
@@ -3281,7 +3411,7 @@
       break;
 
     case Primitive::kPrimDouble: {
-      vixl32::DRegister out_dreg = FromLowSToD(LowSRegisterFrom(out));
+      vixl32::DRegister out_dreg = DRegisterFrom(out);
       if (is_volatile && !atomic_ldrd_strd) {
         vixl32::Register lo = RegisterFrom(locations->GetTemp(0));
         vixl32::Register hi = RegisterFrom(locations->GetTemp(1));
@@ -3345,6 +3475,14 @@
   HandleFieldGet(instruction, instruction->GetFieldInfo());
 }
 
+void LocationsBuilderARMVIXL::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+  HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
+}
+
 void LocationsBuilderARMVIXL::VisitNullCheck(HNullCheck* instruction) {
   // TODO(VIXL): https://android-review.googlesource.com/#/c/275337/
   LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
@@ -3381,6 +3519,554 @@
   codegen_->GenerateNullCheck(instruction);
 }
 
+static LoadOperandType GetLoadOperandType(Primitive::Type type) {
+  switch (type) {
+    case Primitive::kPrimNot:
+      return kLoadWord;
+    case Primitive::kPrimBoolean:
+      return kLoadUnsignedByte;
+    case Primitive::kPrimByte:
+      return kLoadSignedByte;
+    case Primitive::kPrimChar:
+      return kLoadUnsignedHalfword;
+    case Primitive::kPrimShort:
+      return kLoadSignedHalfword;
+    case Primitive::kPrimInt:
+      return kLoadWord;
+    case Primitive::kPrimLong:
+      return kLoadWordPair;
+    case Primitive::kPrimFloat:
+      return kLoadSWord;
+    case Primitive::kPrimDouble:
+      return kLoadDWord;
+    default:
+      LOG(FATAL) << "Unreachable type " << type;
+      UNREACHABLE();
+  }
+}
+
+static StoreOperandType GetStoreOperandType(Primitive::Type type) {
+  switch (type) {
+    case Primitive::kPrimNot:
+      return kStoreWord;
+    case Primitive::kPrimBoolean:
+    case Primitive::kPrimByte:
+      return kStoreByte;
+    case Primitive::kPrimChar:
+    case Primitive::kPrimShort:
+      return kStoreHalfword;
+    case Primitive::kPrimInt:
+      return kStoreWord;
+    case Primitive::kPrimLong:
+      return kStoreWordPair;
+    case Primitive::kPrimFloat:
+      return kStoreSWord;
+    case Primitive::kPrimDouble:
+      return kStoreDWord;
+    default:
+      LOG(FATAL) << "Unreachable type " << type;
+      UNREACHABLE();
+  }
+}
+
+void CodeGeneratorARMVIXL::LoadFromShiftedRegOffset(Primitive::Type type,
+                                                    Location out_loc,
+                                                    vixl32::Register base,
+                                                    vixl32::Register reg_index,
+                                                    vixl32::Condition cond) {
+  uint32_t shift_count = Primitive::ComponentSizeShift(type);
+  MemOperand mem_address(base, reg_index, vixl32::LSL, shift_count);
+
+  switch (type) {
+    case Primitive::kPrimByte:
+      __ Ldrsb(cond, RegisterFrom(out_loc), mem_address);
+      break;
+    case Primitive::kPrimBoolean:
+      __ Ldrb(cond, RegisterFrom(out_loc), mem_address);
+      break;
+    case Primitive::kPrimShort:
+      __ Ldrsh(cond, RegisterFrom(out_loc), mem_address);
+      break;
+    case Primitive::kPrimChar:
+      __ Ldrh(cond, RegisterFrom(out_loc), mem_address);
+      break;
+    case Primitive::kPrimNot:
+    case Primitive::kPrimInt:
+      __ Ldr(cond, RegisterFrom(out_loc), mem_address);
+      break;
+    // T32 doesn't support LoadFromShiftedRegOffset mem address mode for these types.
+    case Primitive::kPrimLong:
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble:
+    default:
+      LOG(FATAL) << "Unreachable type " << type;
+      UNREACHABLE();
+  }
+}
+
+void CodeGeneratorARMVIXL::StoreToShiftedRegOffset(Primitive::Type type,
+                                                   Location loc,
+                                                   vixl32::Register base,
+                                                   vixl32::Register reg_index,
+                                                   vixl32::Condition cond) {
+  uint32_t shift_count = Primitive::ComponentSizeShift(type);
+  MemOperand mem_address(base, reg_index, vixl32::LSL, shift_count);
+
+  switch (type) {
+    case Primitive::kPrimByte:
+    case Primitive::kPrimBoolean:
+      __ Strb(cond, RegisterFrom(loc), mem_address);
+      break;
+    case Primitive::kPrimShort:
+    case Primitive::kPrimChar:
+      __ Strh(cond, RegisterFrom(loc), mem_address);
+      break;
+    case Primitive::kPrimNot:
+    case Primitive::kPrimInt:
+      __ Str(cond, RegisterFrom(loc), mem_address);
+      break;
+    // T32 doesn't support StoreToShiftedRegOffset mem address mode for these types.
+    case Primitive::kPrimLong:
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble:
+    default:
+      LOG(FATAL) << "Unreachable type " << type;
+      UNREACHABLE();
+  }
+}
+
+void LocationsBuilderARMVIXL::VisitArrayGet(HArrayGet* instruction) {
+  bool object_array_get_with_read_barrier =
+      kEmitCompilerReadBarrier && (instruction->GetType() == Primitive::kPrimNot);
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction,
+                                                   object_array_get_with_read_barrier ?
+                                                       LocationSummary::kCallOnSlowPath :
+                                                       LocationSummary::kNoCall);
+  if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
+    TODO_VIXL32(FATAL);
+  }
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+  if (Primitive::IsFloatingPointType(instruction->GetType())) {
+    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+  } else {
+    // The output overlaps in the case of an object array get with
+    // read barriers enabled: we do not want the move to overwrite the
+    // array's location, as we need it to emit the read barrier.
+    locations->SetOut(
+        Location::RequiresRegister(),
+        object_array_get_with_read_barrier ? Location::kOutputOverlap : Location::kNoOutputOverlap);
+  }
+  // We need a temporary register for the read barrier marking slow
+  // path in CodeGeneratorARM::GenerateArrayLoadWithBakerReadBarrier.
+  // Also need for String compression feature.
+  if ((object_array_get_with_read_barrier && kUseBakerReadBarrier)
+      || (mirror::kUseStringCompression && instruction->IsStringCharAt())) {
+    TODO_VIXL32(FATAL);
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) {
+  UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler());
+  LocationSummary* locations = instruction->GetLocations();
+  Location obj_loc = locations->InAt(0);
+  vixl32::Register obj = InputRegisterAt(instruction, 0);
+  Location index = locations->InAt(1);
+  Location out_loc = locations->Out();
+  uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
+  Primitive::Type type = instruction->GetType();
+  const bool maybe_compressed_char_at = mirror::kUseStringCompression &&
+                                        instruction->IsStringCharAt();
+  HInstruction* array_instr = instruction->GetArray();
+  bool has_intermediate_address = array_instr->IsIntermediateAddress();
+  // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
+  DCHECK(!(has_intermediate_address && kEmitCompilerReadBarrier));
+
+  switch (type) {
+    case Primitive::kPrimBoolean:
+    case Primitive::kPrimByte:
+    case Primitive::kPrimShort:
+    case Primitive::kPrimChar:
+    case Primitive::kPrimInt: {
+      if (index.IsConstant()) {
+        int32_t const_index = index.GetConstant()->AsIntConstant()->GetValue();
+        if (maybe_compressed_char_at) {
+          TODO_VIXL32(FATAL);
+        } else {
+          uint32_t full_offset = data_offset + (const_index << Primitive::ComponentSizeShift(type));
+
+          LoadOperandType load_type = GetLoadOperandType(type);
+          GetAssembler()->LoadFromOffset(load_type, RegisterFrom(out_loc), obj, full_offset);
+        }
+      } else {
+        vixl32::Register temp = temps.Acquire();
+
+        if (has_intermediate_address) {
+          TODO_VIXL32(FATAL);
+        } else {
+          __ Add(temp, obj, data_offset);
+        }
+        if (maybe_compressed_char_at) {
+          TODO_VIXL32(FATAL);
+        } else {
+          codegen_->LoadFromShiftedRegOffset(type, out_loc, temp, RegisterFrom(index));
+        }
+      }
+      break;
+    }
+
+    case Primitive::kPrimNot: {
+      static_assert(
+          sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
+          "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
+      // /* HeapReference<Object> */ out =
+      //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
+      if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+        TODO_VIXL32(FATAL);
+      } else {
+        vixl32::Register out = OutputRegister(instruction);
+        if (index.IsConstant()) {
+          size_t offset =
+              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+          GetAssembler()->LoadFromOffset(kLoadWord, out, obj, offset);
+          codegen_->MaybeRecordImplicitNullCheck(instruction);
+          // If read barriers are enabled, emit read barriers other than
+          // Baker's using a slow path (and also unpoison the loaded
+          // reference, if heap poisoning is enabled).
+          codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset);
+        } else {
+          vixl32::Register temp = temps.Acquire();
+
+          if (has_intermediate_address) {
+            TODO_VIXL32(FATAL);
+          } else {
+            __ Add(temp, obj, data_offset);
+          }
+          codegen_->LoadFromShiftedRegOffset(type, out_loc, temp, RegisterFrom(index));
+
+          codegen_->MaybeRecordImplicitNullCheck(instruction);
+          // If read barriers are enabled, emit read barriers other than
+          // Baker's using a slow path (and also unpoison the loaded
+          // reference, if heap poisoning is enabled).
+          codegen_->MaybeGenerateReadBarrierSlow(
+              instruction, out_loc, out_loc, obj_loc, data_offset, index);
+        }
+      }
+      break;
+    }
+
+    case Primitive::kPrimLong: {
+      if (index.IsConstant()) {
+        size_t offset =
+            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+        GetAssembler()->LoadFromOffset(kLoadWordPair, LowRegisterFrom(out_loc), obj, offset);
+      } else {
+        vixl32::Register temp = temps.Acquire();
+        __ Add(temp, obj, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8));
+        GetAssembler()->LoadFromOffset(kLoadWordPair, LowRegisterFrom(out_loc), temp, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimFloat: {
+      vixl32::SRegister out = SRegisterFrom(out_loc);
+      if (index.IsConstant()) {
+        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+        GetAssembler()->LoadSFromOffset(out, obj, offset);
+      } else {
+        vixl32::Register temp = temps.Acquire();
+        __ Add(temp, obj, Operand(RegisterFrom(index), vixl32::LSL, TIMES_4));
+        GetAssembler()->LoadSFromOffset(out, temp, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimDouble: {
+      if (index.IsConstant()) {
+        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+        GetAssembler()->LoadDFromOffset(DRegisterFrom(out_loc), obj, offset);
+      } else {
+        vixl32::Register temp = temps.Acquire();
+        __ Add(temp, obj, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8));
+        GetAssembler()->LoadDFromOffset(DRegisterFrom(out_loc), temp, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimVoid:
+      LOG(FATAL) << "Unreachable type " << type;
+      UNREACHABLE();
+  }
+
+  if (type == Primitive::kPrimNot) {
+    // Potential implicit null checks, in the case of reference
+    // arrays, are handled in the previous switch statement.
+  } else if (!maybe_compressed_char_at) {
+    codegen_->MaybeRecordImplicitNullCheck(instruction);
+  }
+}
+
+void LocationsBuilderARMVIXL::VisitArraySet(HArraySet* instruction) {
+  Primitive::Type value_type = instruction->GetComponentType();
+
+  bool needs_write_barrier =
+      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
+  bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
+
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+      instruction,
+      may_need_runtime_call_for_type_check ?
+          LocationSummary::kCallOnSlowPath :
+          LocationSummary::kNoCall);
+
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+  if (Primitive::IsFloatingPointType(value_type)) {
+    locations->SetInAt(2, Location::RequiresFpuRegister());
+  } else {
+    locations->SetInAt(2, Location::RequiresRegister());
+  }
+  if (needs_write_barrier) {
+    // Temporary registers for the write barrier.
+    locations->AddTemp(Location::RequiresRegister());  // Possibly used for ref. poisoning too.
+    locations->AddTemp(Location::RequiresRegister());
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) {
+  UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler());
+  LocationSummary* locations = instruction->GetLocations();
+  vixl32::Register array = InputRegisterAt(instruction, 0);
+  Location index = locations->InAt(1);
+  Primitive::Type value_type = instruction->GetComponentType();
+  bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
+  bool needs_write_barrier =
+      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
+  uint32_t data_offset =
+      mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value();
+  Location value_loc = locations->InAt(2);
+  HInstruction* array_instr = instruction->GetArray();
+  bool has_intermediate_address = array_instr->IsIntermediateAddress();
+  // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
+  DCHECK(!(has_intermediate_address && kEmitCompilerReadBarrier));
+
+  switch (value_type) {
+    case Primitive::kPrimBoolean:
+    case Primitive::kPrimByte:
+    case Primitive::kPrimShort:
+    case Primitive::kPrimChar:
+    case Primitive::kPrimInt: {
+      if (index.IsConstant()) {
+        int32_t const_index = index.GetConstant()->AsIntConstant()->GetValue();
+        uint32_t full_offset =
+            data_offset + (const_index << Primitive::ComponentSizeShift(value_type));
+        StoreOperandType store_type = GetStoreOperandType(value_type);
+        GetAssembler()->StoreToOffset(store_type, RegisterFrom(value_loc), array, full_offset);
+      } else {
+        vixl32::Register temp = temps.Acquire();
+
+        if (has_intermediate_address) {
+          TODO_VIXL32(FATAL);
+        } else {
+          __ Add(temp, array, data_offset);
+        }
+        codegen_->StoreToShiftedRegOffset(value_type, value_loc, temp, RegisterFrom(index));
+      }
+      break;
+    }
+
+    case Primitive::kPrimNot: {
+      vixl32::Register value = RegisterFrom(value_loc);
+      // TryExtractArrayAccessAddress optimization is never applied for non-primitive ArraySet.
+      // See the comment in instruction_simplifier_shared.cc.
+      DCHECK(!has_intermediate_address);
+
+      if (instruction->InputAt(2)->IsNullConstant()) {
+        // Just setting null.
+        if (index.IsConstant()) {
+          size_t offset =
+              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+          GetAssembler()->StoreToOffset(kStoreWord, value, array, offset);
+        } else {
+          DCHECK(index.IsRegister()) << index;
+          vixl32::Register temp = temps.Acquire();
+          __ Add(temp, array, data_offset);
+          codegen_->StoreToShiftedRegOffset(value_type, value_loc, temp, RegisterFrom(index));
+        }
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+        DCHECK(!needs_write_barrier);
+        DCHECK(!may_need_runtime_call_for_type_check);
+        break;
+      }
+
+      DCHECK(needs_write_barrier);
+      Location temp1_loc = locations->GetTemp(0);
+      vixl32::Register temp1 = RegisterFrom(temp1_loc);
+      Location temp2_loc = locations->GetTemp(1);
+      vixl32::Register temp2 = RegisterFrom(temp2_loc);
+      uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+      uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+      uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+      vixl32::Label done;
+      SlowPathCodeARMVIXL* slow_path = nullptr;
+
+      if (may_need_runtime_call_for_type_check) {
+        slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARMVIXL(instruction);
+        codegen_->AddSlowPath(slow_path);
+        if (instruction->GetValueCanBeNull()) {
+          vixl32::Label non_zero;
+          __ Cbnz(value, &non_zero);
+          if (index.IsConstant()) {
+            size_t offset =
+               (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+            GetAssembler()->StoreToOffset(kStoreWord, value, array, offset);
+          } else {
+            DCHECK(index.IsRegister()) << index;
+            vixl32::Register temp = temps.Acquire();
+            __ Add(temp, array, data_offset);
+            codegen_->StoreToShiftedRegOffset(value_type, value_loc, temp, RegisterFrom(index));
+          }
+          codegen_->MaybeRecordImplicitNullCheck(instruction);
+          __ B(&done);
+          __ Bind(&non_zero);
+        }
+
+        // Note that when read barriers are enabled, the type checks
+        // are performed without read barriers.  This is fine, even in
+        // the case where a class object is in the from-space after
+        // the flip, as a comparison involving such a type would not
+        // produce a false positive; it may of course produce a false
+        // negative, in which case we would take the ArraySet slow
+        // path.
+
+        // /* HeapReference<Class> */ temp1 = array->klass_
+        GetAssembler()->LoadFromOffset(kLoadWord, temp1, array, class_offset);
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+        GetAssembler()->MaybeUnpoisonHeapReference(temp1);
+
+        // /* HeapReference<Class> */ temp1 = temp1->component_type_
+        GetAssembler()->LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
+        // /* HeapReference<Class> */ temp2 = value->klass_
+        GetAssembler()->LoadFromOffset(kLoadWord, temp2, value, class_offset);
+        // If heap poisoning is enabled, no need to unpoison `temp1`
+        // nor `temp2`, as we are comparing two poisoned references.
+        __ Cmp(temp1, temp2);
+
+        if (instruction->StaticTypeOfArrayIsObjectArray()) {
+          vixl32::Label do_put;
+          __ B(eq, &do_put);
+          // If heap poisoning is enabled, the `temp1` reference has
+          // not been unpoisoned yet; unpoison it now.
+          GetAssembler()->MaybeUnpoisonHeapReference(temp1);
+
+          // /* HeapReference<Class> */ temp1 = temp1->super_class_
+          GetAssembler()->LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
+          // If heap poisoning is enabled, no need to unpoison
+          // `temp1`, as we are comparing against null below.
+          __ Cbnz(temp1, slow_path->GetEntryLabel());
+          __ Bind(&do_put);
+        } else {
+          __ B(ne, slow_path->GetEntryLabel());
+        }
+      }
+
+      vixl32::Register source = value;
+      if (kPoisonHeapReferences) {
+        // Note that in the case where `value` is a null reference,
+        // we do not enter this block, as a null reference does not
+        // need poisoning.
+        DCHECK_EQ(value_type, Primitive::kPrimNot);
+        __ Mov(temp1, value);
+        GetAssembler()->PoisonHeapReference(temp1);
+        source = temp1;
+      }
+
+      if (index.IsConstant()) {
+        size_t offset =
+            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+        GetAssembler()->StoreToOffset(kStoreWord, source, array, offset);
+      } else {
+        DCHECK(index.IsRegister()) << index;
+
+        vixl32::Register temp = temps.Acquire();
+        __ Add(temp, array, data_offset);
+        codegen_->StoreToShiftedRegOffset(value_type,
+                                          LocationFrom(source),
+                                          temp,
+                                          RegisterFrom(index));
+      }
+
+      if (!may_need_runtime_call_for_type_check) {
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+      }
+
+      codegen_->MarkGCCard(temp1, temp2, array, value, instruction->GetValueCanBeNull());
+
+      if (done.IsReferenced()) {
+        __ Bind(&done);
+      }
+
+      if (slow_path != nullptr) {
+        __ Bind(slow_path->GetExitLabel());
+      }
+
+      break;
+    }
+
+    case Primitive::kPrimLong: {
+      Location value = locations->InAt(2);
+      if (index.IsConstant()) {
+        size_t offset =
+            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+        GetAssembler()->StoreToOffset(kStoreWordPair, LowRegisterFrom(value), array, offset);
+      } else {
+        vixl32::Register temp = temps.Acquire();
+        __ Add(temp, array, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8));
+        GetAssembler()->StoreToOffset(kStoreWordPair, LowRegisterFrom(value), temp, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimFloat: {
+      Location value = locations->InAt(2);
+      DCHECK(value.IsFpuRegister());
+      if (index.IsConstant()) {
+        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+        GetAssembler()->StoreSToOffset(SRegisterFrom(value), array, offset);
+      } else {
+        vixl32::Register temp = temps.Acquire();
+        __ Add(temp, array, Operand(RegisterFrom(index), vixl32::LSL, TIMES_4));
+        GetAssembler()->StoreSToOffset(SRegisterFrom(value), temp, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimDouble: {
+      Location value = locations->InAt(2);
+      DCHECK(value.IsFpuRegisterPair());
+      if (index.IsConstant()) {
+        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+        GetAssembler()->StoreDToOffset(DRegisterFrom(value), array, offset);
+      } else {
+        vixl32::Register temp = temps.Acquire();
+        __ Add(temp, array, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8));
+        GetAssembler()->StoreDToOffset(DRegisterFrom(value), temp, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimVoid:
+      LOG(FATAL) << "Unreachable type " << value_type;
+      UNREACHABLE();
+  }
+
+  // Objects are handled in the switch.
+  if (value_type != Primitive::kPrimNot) {
+    codegen_->MaybeRecordImplicitNullCheck(instruction);
+  }
+}
+
 void LocationsBuilderARMVIXL::VisitArrayLength(HArrayLength* instruction) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
@@ -3397,6 +4083,28 @@
   // TODO(VIXL): https://android-review.googlesource.com/#/c/272625/
 }
 
+void LocationsBuilderARMVIXL::VisitBoundsCheck(HBoundsCheck* instruction) {
+  RegisterSet caller_saves = RegisterSet::Empty();
+  InvokeRuntimeCallingConventionARMVIXL calling_convention;
+  caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0)));
+  caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(1)));
+  LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitBoundsCheck(HBoundsCheck* instruction) {
+  SlowPathCodeARMVIXL* slow_path =
+      new (GetGraph()->GetArena()) BoundsCheckSlowPathARMVIXL(instruction);
+  codegen_->AddSlowPath(slow_path);
+
+  vixl32::Register index = InputRegisterAt(instruction, 0);
+  vixl32::Register length = InputRegisterAt(instruction, 1);
+
+  __ Cmp(index, length);
+  __ B(hs, slow_path->GetEntryLabel());
+}
+
 void CodeGeneratorARMVIXL::MarkGCCard(vixl32::Register temp,
                                       vixl32::Register card,
                                       vixl32::Register object,
@@ -3509,7 +4217,7 @@
     }
   } else if (source.IsFpuRegister()) {
     if (destination.IsRegister()) {
-      TODO_VIXL32(FATAL);
+      __ Vmov(RegisterFrom(destination), SRegisterFrom(source));
     } else if (destination.IsFpuRegister()) {
       __ Vmov(SRegisterFrom(destination), SRegisterFrom(source));
     } else {
@@ -3534,9 +4242,7 @@
       __ Mov(LowRegisterFrom(destination), LowRegisterFrom(source));
       __ Mov(HighRegisterFrom(destination), HighRegisterFrom(source));
     } else if (destination.IsFpuRegisterPair()) {
-      __ Vmov(FromLowSToD(LowSRegisterFrom(destination)),
-              LowRegisterFrom(source),
-              HighRegisterFrom(source));
+      __ Vmov(DRegisterFrom(destination), LowRegisterFrom(source), HighRegisterFrom(source));
     } else {
       DCHECK(destination.IsDoubleStackSlot()) << destination;
       DCHECK(ExpectedPairLayout(source));
@@ -3547,7 +4253,7 @@
     }
   } else if (source.IsFpuRegisterPair()) {
     if (destination.IsRegisterPair()) {
-      TODO_VIXL32(FATAL);
+      __ Vmov(LowRegisterFrom(destination), HighRegisterFrom(destination), DRegisterFrom(source));
     } else if (destination.IsFpuRegisterPair()) {
       __ Vmov(DRegisterFrom(destination), DRegisterFrom(source));
     } else {
@@ -3586,7 +4292,7 @@
     } else if (constant->IsDoubleConstant()) {
       double value = constant->AsDoubleConstant()->GetValue();
       if (destination.IsFpuRegisterPair()) {
-        __ Vmov(FromLowSToD(LowSRegisterFrom(destination)), value);
+        __ Vmov(DRegisterFrom(destination), value);
       } else {
         DCHECK(destination.IsDoubleStackSlot()) << destination;
         uint64_t int_value = bit_cast<uint64_t, double>(value);
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 02bf960..df7d467 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -106,13 +106,18 @@
   M(AboveOrEqual)                               \
   M(Add)                                        \
   M(And)                                        \
+  M(ArrayGet)                                   \
   M(ArrayLength)                                \
+  M(ArraySet)                                   \
   M(Below)                                      \
   M(BelowOrEqual)                               \
+  M(BooleanNot)                                 \
+  M(BoundsCheck)                                \
   M(ClearException)                             \
   M(ClinitCheck)                                \
   M(Compare)                                    \
   M(CurrentMethod)                              \
+  M(Deoptimize)                                 \
   M(Div)                                        \
   M(DivZeroCheck)                               \
   M(DoubleConstant)                             \
@@ -154,6 +159,7 @@
   M(Shl)                                        \
   M(Shr)                                        \
   M(StaticFieldGet)                             \
+  M(StaticFieldSet)                             \
   M(Sub)                                        \
   M(SuspendCheck)                               \
   M(Throw)                                      \
@@ -164,14 +170,9 @@
 
 // TODO: Remove once the VIXL32 backend is implemented completely.
 #define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M)   \
-  M(ArrayGet)                                   \
-  M(ArraySet)                                   \
-  M(BooleanNot)                                 \
-  M(BoundsCheck)                                \
   M(BoundType)                                  \
   M(CheckCast)                                  \
   M(ClassTableGet)                              \
-  M(Deoptimize)                                 \
   M(InstanceOf)                                 \
   M(InvokeInterface)                            \
   M(InvokeUnresolved)                           \
@@ -179,7 +180,6 @@
   M(NativeDebugInfo)                            \
   M(PackedSwitch)                               \
   M(Rem)                                        \
-  M(StaticFieldSet)                             \
   M(UnresolvedInstanceFieldGet)                 \
   M(UnresolvedInstanceFieldSet)                 \
   M(UnresolvedStaticFieldGet)                   \
@@ -438,6 +438,17 @@
   // Helper method to move a 32-bit value between two locations.
   void Move32(Location destination, Location source);
 
+  void LoadFromShiftedRegOffset(Primitive::Type type,
+                                Location out_loc,
+                                vixl::aarch32::Register base,
+                                vixl::aarch32::Register reg_index,
+                                vixl::aarch32::Condition cond = vixl::aarch32::al);
+  void StoreToShiftedRegOffset(Primitive::Type type,
+                               Location out_loc,
+                               vixl::aarch32::Register base,
+                               vixl::aarch32::Register reg_index,
+                               vixl::aarch32::Condition cond = vixl::aarch32::al);
+
   const ArmInstructionSetFeatures& GetInstructionSetFeatures() const { return isa_features_; }
 
   vixl::aarch32::Label* GetFrameEntryLabel() { return &frame_entry_label_; }
diff --git a/compiler/optimizing/common_arm.h b/compiler/optimizing/common_arm.h
index 5d92bfd..8c08a9c 100644
--- a/compiler/optimizing/common_arm.h
+++ b/compiler/optimizing/common_arm.h
@@ -37,11 +37,6 @@
   return dwarf::Reg::ArmFp(static_cast<int>(reg.GetCode()));
 }
 
-inline vixl::aarch32::DRegister FromLowSToD(vixl::aarch32::SRegister reg) {
-  DCHECK_EQ(reg.GetCode() % 2, 0u) << reg;
-  return vixl::aarch32::DRegister(reg.GetCode() / 2);
-}
-
 inline vixl::aarch32::Register HighRegisterFrom(Location location) {
   DCHECK(location.IsRegisterPair()) << location;
   return vixl::aarch32::Register(location.AsRegisterPairHigh<vixl32::Register>());
@@ -135,6 +130,11 @@
                       instr->InputAt(input_index)->GetType());
 }
 
+inline vixl::aarch32::Register InputRegister(HInstruction* instr) {
+  DCHECK_EQ(instr->InputCount(), 1u);
+  return InputRegisterAt(instr, 0);
+}
+
 inline int64_t Int64ConstantFrom(Location location) {
   HConstant* instr = location.GetConstant();
   if (instr->IsIntConstant()) {
diff --git a/test/Android.arm_vixl.mk b/test/Android.arm_vixl.mk
index 5f969e3..2d9708d 100644
--- a/test/Android.arm_vixl.mk
+++ b/test/Android.arm_vixl.mk
@@ -18,33 +18,23 @@
 TEST_ART_BROKEN_OPTIMIZING_ARM_VIXL_RUN_TESTS := \
   002-sleep \
   003-omnibus-opcodes \
-  004-checker-UnsafeTest18 \
   004-InterfaceTest \
   004-JniTest \
   004-NativeAllocations \
-  004-ReferenceMap \
-  004-SignalTest \
-  004-StackWalk \
   004-ThreadStress \
   004-UnsafeTest \
+  004-checker-UnsafeTest18 \
   005-annotations \
-  006-args \
-  008-exceptions \
   009-instanceof \
-  011-array-copy \
   012-math \
   015-switch \
-  019-wrong-array-type \
-  020-string \
   021-string2 \
   022-interface \
   023-many-interfaces \
   024-illegal-access \
   025-access-controller \
-  027-arithmetic \
   028-array-write \
   031-class-attributes \
-  032-concrete-sub \
   035-enum \
   036-finalizer \
   037-inherit \
@@ -54,7 +44,6 @@
   046-reflect \
   047-returns \
   048-reflect-v8 \
-  049-show-object \
   050-sync-test \
   051-thread \
   052-verifier-fun \
@@ -62,7 +51,6 @@
   054-uncaught \
   055-enum-performance \
   058-enum-order \
-  059-finalizer-throw \
   061-out-of-memory \
   062-character-encodings \
   063-process-manager \
@@ -72,31 +60,21 @@
   067-preemptive-unpark \
   068-classloader \
   069-field-type \
-  070-nio-buffer \
   071-dexfile \
-  072-precise-gc \
   074-gc-thrash \
   075-verification-error \
-  076-boolean-put \
   079-phantom \
   080-oom-throw \
-  080-oom-throw-with-finalizer \
-  081-hot-exceptions \
   082-inline-execute \
   083-compiler-regressions \
   086-null-super \
   087-gc-after-link \
   088-monitor-verification \
-  090-loop-formation \
   091-override-package-private-method \
   093-serialization \
-  094-pattern \
   096-array-copy-concurrent-gc \
   098-ddmc \
   099-vmdebug \
-  100-reflect2 \
-  101-fibonacci \
-  102-concurrent-gc \
   103-string-append \
   104-growth-limit \
   106-exceptions2 \
@@ -105,8 +83,6 @@
   109-suspend-check \
   113-multidex \
   114-ParallelGC \
-  117-nopatchoat \
-  119-noimage-patchoat \
   120-hashcode \
   121-modifiers \
   122-npe \
@@ -114,7 +90,6 @@
   123-inline-execute2 \
   127-checker-secondarydex \
   129-ThreadGetId \
-  131-structural-change \
   132-daemon-locks-shutdown \
   133-static-invoke-super \
   134-reg-promotion \
@@ -122,33 +97,24 @@
   136-daemon-jni-shutdown \
   137-cfi \
   138-duplicate-classes-check2 \
-  139-register-natives \
   140-field-packing \
   141-class-unload \
   142-classloader2 \
   144-static-field-sigquit \
-  145-alloc-tracking-stress \
   146-bad-interface \
   150-loadlibrary \
   151-OpenFileLimit \
   201-built-in-except-detail-messages \
   304-method-tracing \
-  407-arrays \
-  410-floats \
-  411-optimizing-arith-mul \
   412-new-array \
-  413-regalloc-regression \
-  414-static-fields \
   416-optimizing-arith-not \
   417-optimizing-arith-div \
-  421-exceptions \
   422-instanceof \
   422-type-conversion \
   423-invoke-interface \
   424-checkcast \
   425-invoke-super \
   426-monitor \
-  427-bounds \
   428-optimizing-arith-rem \
   430-live-register-slow-path \
   431-type-propagation \
@@ -156,9 +122,7 @@
   434-invoke-direct \
   436-rem-float \
   437-inline \
-  438-volatile \
   439-npe \
-  441-checker-inliner \
   442-checker-constant-folding \
   444-checker-nce \
   445-checker-licm \
@@ -166,83 +130,56 @@
   448-multiple-returns \
   449-checker-bce \
   450-checker-types \
-  451-regression-add-float \
-  451-spill-splot \
   452-multiple-returns2 \
   453-not-byte \
-  454-get-vreg \
-  456-baseline-array-set \
-  457-regs \
   458-checker-instruct-simplification \
   458-long-to-fpu \
-  459-dead-phi \
   460-multiple-returns3 \
-  461-get-reference-vreg \
   463-checker-boolean-simplifier \
-  466-get-live-vreg \
   467-regalloc-pair \
   468-checker-bool-simplif-regression \
-  469-condition-materialization \
-  471-deopt-environment \
-  472-type-propagation \
-  474-checker-boolean-input \
   475-regression-inliner-ids \
   477-checker-bound-type \
   478-checker-clinit-check-pruning \
   483-dce-block \
-  484-checker-register-hints \
   485-checker-dce-switch \
   486-checker-must-do-null-check \
   488-checker-inline-recursive-calls \
   490-checker-inline \
-  491-current-method \
   492-checker-inline-invoke-interface \
   493-checker-inline-invoke-interface \
   494-checker-instanceof-tests \
   495-checker-checkcast-tests \
   496-checker-inlining-class-loader \
   497-inlining-and-class-loader \
-  498-type-propagation \
-  499-bce-phi-array-length \
   500-instanceof \
-  501-null-constant-dce \
   501-regression-packed-switch \
-  503-dead-instructions \
   504-regression-baseline-entry \
   508-checker-disassembly \
   510-checker-try-catch \
-  513-array-deopt \
   515-dce-dominator \
   517-checker-builder-fallthrough \
   518-null-array-get \
   519-bound-load-class \
   520-equivalent-phi \
-  521-checker-array-set-null \
-  521-regression-integer-field-set \
   522-checker-regression-monitor-exit \
   523-checker-can-throw-regression \
   525-checker-arrays-fields1 \
   525-checker-arrays-fields2 \
   526-checker-caller-callee-regs \
-  526-long-regalloc \
   527-checker-array-access-split \
   528-long-hint \
   529-checker-unresolved \
-  529-long-split \
   530-checker-loops1 \
   530-checker-loops2 \
-  530-checker-loops3 \
   530-checker-lse \
   530-checker-regression-reftyp-final \
   530-instanceof-checkcast \
-  532-checker-nonnull-arrayset \
   534-checker-bce-deoptimization \
-  535-deopt-and-inlining \
   535-regression-const-val \
   536-checker-intrinsic-optimization \
   536-checker-needs-access-check \
   537-checker-inline-and-unverified \
-  537-checker-jump-over-jump \
   538-checker-embed-constants \
   540-checker-rtp-bug \
   541-regression-inlined-deopt \
@@ -253,9 +190,8 @@
   546-regression-simplify-catch \
   550-checker-multiply-accumulate \
   550-checker-regression-wide-store \
-  551-checker-shifter-operand \
-  551-invoke-super \
   552-checker-sharpening \
+  551-invoke-super \
   552-checker-primitive-typeprop \
   552-invoke-non-existent-super \
   553-invoke-super \
@@ -263,87 +199,51 @@
   555-UnsafeGetLong-regression \
   556-invoke-super \
   558-switch \
-  559-bce-ssa \
-  559-checker-irreducible-loop \
-  559-checker-rtp-ifnotnull \
   560-packed-switch \
   561-divrem \
-  561-shared-slowpaths \
   562-bce-preheader \
-  562-no-intermediate \
   563-checker-fakestring \
-  564-checker-irreducible-loop \
   564-checker-negbitwise \
-  565-checker-doublenegbitwise \
-  565-checker-irreducible-loop \
   566-polymorphic-inlining \
-  568-checker-onebit \
   570-checker-osr \
   570-checker-select \
-  571-irreducible-loop \
-  572-checker-array-get-regression \
   573-checker-checkcast-regression \
-  574-irreducible-and-constant-area \
-  575-checker-isnan \
   575-checker-string-init-alias \
-  577-checker-fp2int \
-  578-bce-visit \
   580-checker-round \
-  580-checker-string-fact-intrinsics \
-  581-rtp \
-  582-checker-bce-length \
   584-checker-div-bool \
   586-checker-null-array-get \
   587-inline-class-error \
   588-checker-irreducib-lifetime-hole \
   589-super-imt \
-  590-checker-arr-set-null-regression \
-  591-new-instance-string \
   592-checker-regression-bool-input \
-  593-checker-long-2-float-regression \
-  593-checker-shift-and-simplifier \
   594-checker-array-alias \
   594-invoke-super \
   594-load-string-regression \
-  595-error-class \
-  596-checker-dead-phi \
   597-deopt-new-string \
-  599-checker-irreducible-loop \
   600-verifier-fails \
   601-method-access \
   602-deoptimizeable \
   603-checker-instanceof \
-  604-hot-static-interface \
   605-new-string-from-bytes \
   608-checker-unresolved-lse \
   609-checker-inline-interface \
-  609-checker-x86-bounds-check \
   610-arraycopy \
-  611-checker-simplify-if \
   612-jit-dex-cache \
-  613-inlining-dex-cache \
-  614-checker-dump-constant-location \
-  615-checker-arm64-store-zero \
-  617-clinit-oome \
-  618-checker-induction \
-  621-checker-new-instance \
   700-LoadArgRegs \
   701-easy-div-rem \
   702-LargeBranchOffset \
-  704-multiply-accumulate \
   800-smali \
   802-deoptimization \
   960-default-smali \
-  961-default-iface-resolution-gen \
   963-default-range-smali \
   965-default-verify \
   966-default-conflict \
   967-default-ame \
-  968-default-partial-compile-gen \
   969-iface-super \
   971-iface-super \
   972-default-imt-collision \
   972-iface-super-multidex \
   973-default-multidex \
   974-verify-interface-super \
-  975-iface-private
+  975-iface-private \
+  979-invoke-polymorphic-accessors