Merge "Fix imgdiag build rules."
diff --git a/cmdline/cmdline.h b/cmdline/cmdline.h
index 2e9f208..4aced5b 100644
--- a/cmdline/cmdline.h
+++ b/cmdline/cmdline.h
@@ -196,6 +196,7 @@
         "  --boot-image=<file.art>: provide the image location for the boot class path.\n"
         "      Do not include the arch as part of the name, it is added automatically.\n"
         "      Example: --boot-image=/system/framework/boot.art\n"
+        "               (specifies /system/framework/<arch>/boot.art as the image file)\n"
         "\n";
     usage += StringPrintf(  // Optional.
         "  --instruction-set=(arm|arm64|mips|mips64|x86|x86_64): for locating the image\n"
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 10841e6..0eb3e43 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -264,18 +264,16 @@
     Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
     uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change) {
   DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
-  ArtMethod* resolved_method = mUnit->GetClassLinker()->ResolveMethod(
-      *dex_cache->GetDexFile(), method_idx, dex_cache, class_loader, nullptr, invoke_type);
-  DCHECK_EQ(resolved_method == nullptr, soa.Self()->IsExceptionPending());
+  ArtMethod* resolved_method =
+      check_incompatible_class_change
+          ? mUnit->GetClassLinker()->ResolveMethod<ClassLinker::kForceICCECheck>(
+              *dex_cache->GetDexFile(), method_idx, dex_cache, class_loader, nullptr, invoke_type)
+          : mUnit->GetClassLinker()->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
+              *dex_cache->GetDexFile(), method_idx, dex_cache, class_loader, nullptr, invoke_type);
   if (UNLIKELY(resolved_method == nullptr)) {
+    DCHECK(soa.Self()->IsExceptionPending());
     // Clean up any exception left by type resolution.
     soa.Self()->ClearException();
-    return nullptr;
-  }
-  if (check_incompatible_class_change &&
-      UNLIKELY(resolved_method->CheckIncompatibleClassChange(invoke_type))) {
-    // Silently return null on incompatible class change.
-    return nullptr;
   }
   return resolved_method;
 }
@@ -361,7 +359,7 @@
     ArtMethod* called_method;
     ClassLinker* class_linker = mUnit->GetClassLinker();
     if (LIKELY(devirt_target->dex_file == mUnit->GetDexFile())) {
-      called_method = class_linker->ResolveMethod(
+      called_method = class_linker->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
           *devirt_target->dex_file, devirt_target->dex_method_index, dex_cache, class_loader,
           nullptr, kVirtual);
     } else {
@@ -369,7 +367,7 @@
       auto target_dex_cache(hs.NewHandle(class_linker->RegisterDexFile(
           *devirt_target->dex_file,
           class_linker->GetOrCreateAllocatorForClassLoader(class_loader.Get()))));
-      called_method = class_linker->ResolveMethod(
+      called_method = class_linker->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
           *devirt_target->dex_file, devirt_target->dex_method_index, target_dex_cache,
           class_loader, nullptr, kVirtual);
     }
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 9d3af16..a05105b 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1902,7 +1902,7 @@
       }
       if (resolve_fields_and_methods) {
         while (it.HasNextDirectMethod()) {
-          ArtMethod* method = class_linker->ResolveMethod(
+          ArtMethod* method = class_linker->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
               dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr,
               it.GetMethodInvokeType(class_def));
           if (method == nullptr) {
@@ -1911,7 +1911,7 @@
           it.Next();
         }
         while (it.HasNextVirtualMethod()) {
-          ArtMethod* method = class_linker->ResolveMethod(
+          ArtMethod* method = class_linker->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
               dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr,
               it.GetMethodInvokeType(class_def));
           if (method == nullptr) {
diff --git a/compiler/elf_writer_debug.cc b/compiler/elf_writer_debug.cc
index 9dc6565..7696b94 100644
--- a/compiler/elf_writer_debug.cc
+++ b/compiler/elf_writer_debug.cc
@@ -293,12 +293,130 @@
   }
 }
 
-struct CompilationUnit {
-  std::vector<const MethodDebugInfo*> methods_;
-  size_t debug_line_offset_ = 0;
-  uint32_t low_pc_ = 0xFFFFFFFFU;
-  uint32_t high_pc_ = 0;
-};
+namespace {
+  struct CompilationUnit {
+    std::vector<const MethodDebugInfo*> methods_;
+    size_t debug_line_offset_ = 0;
+    uint32_t low_pc_ = 0xFFFFFFFFU;
+    uint32_t high_pc_ = 0;
+  };
+
+  struct LocalVariable {
+    uint16_t vreg;
+    uint32_t dex_pc_low;
+    uint32_t dex_pc_high;
+    const char* name;
+    const char* type;
+    const char* sig;
+  };
+
+  struct DebugInfoCallback {
+    static void NewLocal(void* ctx,
+                         uint16_t vreg,
+                         uint32_t start,
+                         uint32_t end,
+                         const char* name,
+                         const char* type,
+                         const char* sig) {
+      auto* context = static_cast<DebugInfoCallback*>(ctx);
+      if (name != nullptr && type != nullptr) {
+        context->local_variables_.push_back({vreg, start, end, name, type, sig});
+      }
+    }
+    std::vector<LocalVariable> local_variables_;
+  };
+
+  std::vector<const char*> GetParamNames(const MethodDebugInfo* mi) {
+    std::vector<const char*> names;
+    const uint8_t* stream = mi->dex_file_->GetDebugInfoStream(mi->code_item_);
+    if (stream != nullptr) {
+      DecodeUnsignedLeb128(&stream);  // line.
+      uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
+      for (uint32_t i = 0; i < parameters_size; ++i) {
+        uint32_t id = DecodeUnsignedLeb128P1(&stream);
+        names.push_back(mi->dex_file_->StringDataByIdx(id));
+      }
+    }
+    return names;
+  }
+
+  struct VariableLocation {
+    uint32_t low_pc;
+    uint32_t high_pc;
+    DexRegisterLocation reg_lo;  // May be None if the location is unknown.
+    DexRegisterLocation reg_hi;  // Most significant bits of 64-bit value.
+  };
+
+  // Get the location of given dex register (e.g. stack or machine register).
+  // Note that the location might be different based on the current pc.
+  // The result will cover all ranges where the variable is in scope.
+  std::vector<VariableLocation> GetVariableLocations(const MethodDebugInfo* method_info,
+                                                     uint16_t vreg,
+                                                     bool is64bitValue,
+                                                     uint32_t dex_pc_low,
+                                                     uint32_t dex_pc_high) {
+    std::vector<VariableLocation> variable_locations;
+
+    // Get stack maps sorted by pc (they might not be sorted internally).
+    const CodeInfo code_info(method_info->compiled_method_->GetVmapTable().data());
+    const StackMapEncoding encoding = code_info.ExtractEncoding();
+    std::map<uint32_t, StackMap> stack_maps;
+    for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(); s++) {
+      StackMap stack_map = code_info.GetStackMapAt(s, encoding);
+      DCHECK(stack_map.IsValid());
+      const uint32_t low_pc = method_info->low_pc_ + stack_map.GetNativePcOffset(encoding);
+      DCHECK_LE(low_pc, method_info->high_pc_);
+      stack_maps.emplace(low_pc, stack_map);
+    }
+
+    // Create entries for the requested register based on stack map data.
+    for (auto it = stack_maps.begin(); it != stack_maps.end(); it++) {
+      const StackMap& stack_map = it->second;
+      const uint32_t low_pc = it->first;
+      auto next_it = it;
+      next_it++;
+      const uint32_t high_pc = next_it != stack_maps.end() ? next_it->first
+                                                           : method_info->high_pc_;
+      DCHECK_LE(low_pc, high_pc);
+      if (low_pc == high_pc) {
+        continue;  // Ignore if the address range is empty.
+      }
+
+      // Check that the stack map is in the requested range.
+      uint32_t dex_pc = stack_map.GetDexPc(encoding);
+      if (!(dex_pc_low <= dex_pc && dex_pc < dex_pc_high)) {
+        continue;
+      }
+
+      // Find the location of the dex register.
+      DexRegisterLocation reg_lo = DexRegisterLocation::None();
+      DexRegisterLocation reg_hi = DexRegisterLocation::None();
+      if (stack_map.HasDexRegisterMap(encoding)) {
+        DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(
+            stack_map, encoding, method_info->code_item_->registers_size_);
+        reg_lo = dex_register_map.GetDexRegisterLocation(
+            vreg, method_info->code_item_->registers_size_, code_info, encoding);
+        if (is64bitValue) {
+          reg_hi = dex_register_map.GetDexRegisterLocation(
+              vreg + 1, method_info->code_item_->registers_size_, code_info, encoding);
+        }
+      }
+
+      // Add location entry for this address range.
+      if (!variable_locations.empty() &&
+          variable_locations.back().reg_lo == reg_lo &&
+          variable_locations.back().reg_hi == reg_hi &&
+          variable_locations.back().high_pc == low_pc) {
+        // Merge with the previous entry (extend its range).
+        variable_locations.back().high_pc = high_pc;
+      } else {
+        variable_locations.push_back({low_pc, high_pc, reg_lo, reg_hi});
+      }
+    }
+
+    return variable_locations;
+  }
+}  // namespace
 
 // Helper class to write .debug_info and its supporting sections.
 template<typename ElfTypes>
@@ -332,6 +450,7 @@
         const DexFile::ProtoId& dex_proto = dex->GetMethodPrototype(dex_method);
         const DexFile::TypeList* dex_params = dex->GetProtoParameters(dex_proto);
         const char* dex_class_desc = dex->GetMethodDeclaringClassDescriptor(dex_method);
+        const bool is_static = (mi->access_flags_ & kAccStatic) != 0;
 
         // Enclose the method in correct class definition.
         if (last_dex_class_desc != dex_class_desc) {
@@ -346,17 +465,17 @@
           last_dex_class_desc = dex_class_desc;
         }
 
+        // Collect information about local variables and parameters.
+        DebugInfoCallback debug_info_callback;
         std::vector<const char*> param_names;
         if (mi->code_item_ != nullptr) {
-          const uint8_t* stream = dex->GetDebugInfoStream(mi->code_item_);
-          if (stream != nullptr) {
-            DecodeUnsignedLeb128(&stream);  // line.
-            uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
-            for (uint32_t i = 0; i < parameters_size; ++i) {
-              uint32_t id = DecodeUnsignedLeb128P1(&stream);
-              param_names.push_back(mi->dex_file_->StringDataByIdx(id));
-            }
-          }
+          dex->DecodeDebugInfo(mi->code_item_,
+                               is_static,
+                               mi->dex_method_index_,
+                               nullptr,
+                               DebugInfoCallback::NewLocal,
+                               &debug_info_callback);
+          param_names = GetParamNames(mi);
         }
 
         int start_depth = info_.Depth();
@@ -367,19 +486,19 @@
         uint8_t frame_base[] = { DW_OP_call_frame_cfa };
         info_.WriteExprLoc(DW_AT_frame_base, &frame_base, sizeof(frame_base));
         WriteLazyType(dex->GetReturnTypeDescriptor(dex_proto));
+        uint32_t vreg = mi->code_item_ == nullptr ? 0 :
+            mi->code_item_->registers_size_ - mi->code_item_->ins_size_;
+        if (!is_static) {
+          info_.StartTag(DW_TAG_formal_parameter);
+          WriteName("this");
+          info_.WriteFlag(DW_AT_artificial, true);
+          WriteLazyType(dex_class_desc);
+          const bool is64bitValue = false;
+          WriteRegLocation(mi, vreg, is64bitValue, compilation_unit.low_pc_);
+          vreg++;
+          info_.EndTag();
+        }
         if (dex_params != nullptr) {
-          uint32_t vreg = mi->code_item_ == nullptr ? 0 :
-              mi->code_item_->registers_size_ - mi->code_item_->ins_size_;
-          if ((mi->access_flags_ & kAccStatic) == 0) {
-            info_.StartTag(DW_TAG_formal_parameter);
-            WriteName("this");
-            info_.WriteFlag(DW_AT_artificial, true);
-            WriteLazyType(dex_class_desc);
-            const bool is64bitValue = false;
-            WriteRegLocation(mi, vreg, is64bitValue, compilation_unit.low_pc_);
-            vreg++;
-            info_.EndTag();
-          }
           for (uint32_t i = 0; i < dex_params->Size(); ++i) {
             info_.StartTag(DW_TAG_formal_parameter);
             // Parameter names may not be always available.
@@ -399,6 +518,18 @@
             CHECK_EQ(vreg, mi->code_item_->registers_size_);
           }
         }
+        for (const LocalVariable& var : debug_info_callback.local_variables_) {
+          const uint32_t first_arg = mi->code_item_->registers_size_ - mi->code_item_->ins_size_;
+          if (var.vreg < first_arg) {
+            info_.StartTag(DW_TAG_variable);
+            WriteName(var.name);
+            WriteLazyType(var.type);
+            bool is64bitValue = var.type[0] == 'D' || var.type[0] == 'J';
+            WriteRegLocation(mi, var.vreg, is64bitValue, compilation_unit.low_pc_,
+                             var.dex_pc_low, var.dex_pc_high);
+            info_.EndTag();
+          }
+        }
         info_.EndTag();
         CHECK_EQ(info_.Depth(), start_depth);  // Balanced start/end.
       }
@@ -420,8 +551,12 @@
     // Write table into .debug_loc which describes location of dex register.
     // The dex register might be valid only at some points and it might
     // move between machine registers and stack.
-    void WriteRegLocation(const MethodDebugInfo* method_info, uint16_t vreg,
-                          bool is64bitValue, uint32_t compilation_unit_low_pc) {
+    void WriteRegLocation(const MethodDebugInfo* method_info,
+                          uint16_t vreg,
+                          bool is64bitValue,
+                          uint32_t compilation_unit_low_pc,
+                          uint32_t dex_pc_low = 0,
+                          uint32_t dex_pc_high = 0xFFFFFFFF) {
       using Kind = DexRegisterLocation::Kind;
       bool is_optimizing = method_info->compiled_method_->GetQuickCode().size() > 0 &&
                            method_info->compiled_method_->GetVmapTable().size() > 0 &&
@@ -431,46 +566,29 @@
         return;
       }
 
-      Writer<> writer(&owner_->debug_loc_);
-      info_.WriteSecOffset(DW_AT_location, writer.size());
+      Writer<> debug_loc(&owner_->debug_loc_);
+      Writer<> debug_ranges(&owner_->debug_ranges_);
+      info_.WriteSecOffset(DW_AT_location, debug_loc.size());
+      info_.WriteSecOffset(DW_AT_start_scope, debug_ranges.size());
 
+      std::vector<VariableLocation> variable_locations = GetVariableLocations(
+          method_info,
+          vreg,
+          is64bitValue,
+          dex_pc_low,
+          dex_pc_high);
+
+      // Write .debug_loc entries.
       const InstructionSet isa = owner_->builder_->GetIsa();
       const bool is64bit = Is64BitInstructionSet(isa);
-      const CodeInfo code_info(method_info->compiled_method_->GetVmapTable().data());
-      const StackMapEncoding encoding = code_info.ExtractEncoding();
-      DexRegisterLocation last_reg_lo = DexRegisterLocation::None();
-      DexRegisterLocation last_reg_hi = DexRegisterLocation::None();
-      size_t offset_of_last_end_address = 0;
-      for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(); s++) {
-        StackMap stack_map = code_info.GetStackMapAt(s, encoding);
-        DCHECK(stack_map.IsValid());
-
-        // Find the location of the dex register.
-        DexRegisterLocation reg_lo = DexRegisterLocation::None();
-        DexRegisterLocation reg_hi = DexRegisterLocation::None();
-        if (stack_map.HasDexRegisterMap(encoding)) {
-          DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(
-              stack_map, encoding, method_info->code_item_->registers_size_);
-          reg_lo = dex_register_map.GetDexRegisterLocation(
-              vreg, method_info->code_item_->registers_size_, code_info, encoding);
-          if (is64bitValue) {
-            reg_hi = dex_register_map.GetDexRegisterLocation(
-                vreg + 1, method_info->code_item_->registers_size_, code_info, encoding);
-          }
-        }
-        if ((reg_lo == last_reg_lo && reg_hi == last_reg_hi) ||
-            reg_lo.GetKind() == Kind::kNone) {
-          // Skip identical or undefined locations.
-          continue;
-        }
-        last_reg_lo = reg_lo;
-        last_reg_hi = reg_hi;
-
+      for (const VariableLocation& variable_location : variable_locations) {
         // Translate dex register location to DWARF expression.
         // Note that 64-bit value might be split to two distinct locations.
         // (for example, two 32-bit machine registers, or even stack and register)
         uint8_t buffer[64];
         uint8_t* pos = buffer;
+        DexRegisterLocation reg_lo = variable_location.reg_lo;
+        DexRegisterLocation reg_hi = variable_location.reg_hi;
         for (int piece = 0; piece < (is64bitValue ? 2 : 1); piece++) {
           DexRegisterLocation reg_loc = (piece == 0 ? reg_lo : reg_hi);
           const Kind kind = reg_loc.GetKind();
@@ -529,43 +647,56 @@
           }
         }
 
-        // Write end address for previous entry.
-        const uint32_t pc = method_info->low_pc_ + stack_map.GetNativePcOffset(encoding);
-        if (offset_of_last_end_address != 0) {
-          if (is64bit) {
-            writer.UpdateUint64(offset_of_last_end_address, pc - compilation_unit_low_pc);
-          } else {
-            writer.UpdateUint32(offset_of_last_end_address, pc - compilation_unit_low_pc);
-          }
-        }
-        offset_of_last_end_address = 0;
-
-        DCHECK_LE(static_cast<size_t>(pos - buffer), sizeof(buffer));
+        // Check that the buffer is large enough; keep half of it empty for safety.
+        DCHECK_LE(static_cast<size_t>(pos - buffer), sizeof(buffer) / 2);
         if (pos > buffer) {
-          // Write start/end address.
           if (is64bit) {
-            writer.PushUint64(pc - compilation_unit_low_pc);
-            offset_of_last_end_address = writer.size();
-            writer.PushUint64(method_info->high_pc_ - compilation_unit_low_pc);
+            debug_loc.PushUint64(variable_location.low_pc - compilation_unit_low_pc);
+            debug_loc.PushUint64(variable_location.high_pc - compilation_unit_low_pc);
           } else {
-            writer.PushUint32(pc - compilation_unit_low_pc);
-            offset_of_last_end_address = writer.size();
-            writer.PushUint32(method_info->high_pc_ - compilation_unit_low_pc);
+            debug_loc.PushUint32(variable_location.low_pc - compilation_unit_low_pc);
+            debug_loc.PushUint32(variable_location.high_pc - compilation_unit_low_pc);
           }
           // Write the expression.
-          writer.PushUint16(pos - buffer);
-          writer.PushData(buffer, pos - buffer);
+          debug_loc.PushUint16(pos - buffer);
+          debug_loc.PushData(buffer, pos - buffer);
         } else {
-          // Otherwise leave the address range undefined.
+          // Do not generate .debug_loc if the location is not known.
         }
       }
       // Write end-of-list entry.
       if (is64bit) {
-        writer.PushUint64(0);
-        writer.PushUint64(0);
+        debug_loc.PushUint64(0);
+        debug_loc.PushUint64(0);
       } else {
-        writer.PushUint32(0);
-        writer.PushUint32(0);
+        debug_loc.PushUint32(0);
+        debug_loc.PushUint32(0);
+      }
+
+      // Write .debug_ranges entries.
+      // This includes ranges where the variable is in scope but the location is not known.
+      for (size_t i = 0; i < variable_locations.size(); i++) {
+        uint32_t low_pc = variable_locations[i].low_pc;
+        uint32_t high_pc = variable_locations[i].high_pc;
+        while (i + 1 < variable_locations.size() && variable_locations[i+1].low_pc == high_pc) {
+          // Merge address range with the next entry.
+          high_pc = variable_locations[++i].high_pc;
+        }
+        if (is64bit) {
+          debug_ranges.PushUint64(low_pc - compilation_unit_low_pc);
+          debug_ranges.PushUint64(high_pc - compilation_unit_low_pc);
+        } else {
+          debug_ranges.PushUint32(low_pc - compilation_unit_low_pc);
+          debug_ranges.PushUint32(high_pc - compilation_unit_low_pc);
+        }
+      }
+      // Write end-of-list entry.
+      if (is64bit) {
+        debug_ranges.PushUint64(0);
+        debug_ranges.PushUint64(0);
+      } else {
+        debug_ranges.PushUint32(0);
+        debug_ranges.PushUint32(0);
       }
     }
 
@@ -748,6 +879,7 @@
     builder_->WriteSection(".debug_abbrev", &debug_abbrev_.Data());
     builder_->WriteSection(".debug_str", &debug_str_.Data());
     builder_->WriteSection(".debug_loc", &debug_loc_);
+    builder_->WriteSection(".debug_ranges", &debug_ranges_);
   }
 
  private:
@@ -760,6 +892,7 @@
   DedupVector debug_abbrev_;
   DedupVector debug_str_;
   std::vector<uint8_t> debug_loc_;
+  std::vector<uint8_t> debug_ranges_;
 
   std::unordered_set<const char*> defined_dex_classes_;  // For CHECKs only.
 };
@@ -820,7 +953,7 @@
 
       struct DebugInfoCallbacks {
         static bool NewPosition(void* ctx, uint32_t address, uint32_t line) {
-          auto* context = reinterpret_cast<DebugInfoCallbacks*>(ctx);
+          auto* context = static_cast<DebugInfoCallbacks*>(ctx);
           context->dex2line_.push_back({address, static_cast<int32_t>(line)});
           return false;
         }
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 0087a0d..e8e775f 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -641,8 +641,12 @@
     StackHandleScope<1> hs(soa.Self());
     Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(
         Thread::Current(), *dex_file_)));
-    ArtMethod* method = linker->ResolveMethod(
-        *dex_file_, it.GetMemberIndex(), dex_cache, NullHandle<mirror::ClassLoader>(), nullptr,
+    ArtMethod* method = linker->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
+        *dex_file_,
+        it.GetMemberIndex(),
+        dex_cache,
+        NullHandle<mirror::ClassLoader>(),
+        nullptr,
         invoke_type);
     if (method == nullptr) {
       LOG(INTERNAL_FATAL) << "Unexpected failure to resolve a method: "
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 7dbfd7c..4c3f66a 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -1138,8 +1138,8 @@
   void VisitArrayGet(HArrayGet* array_get) OVERRIDE {
     if (!has_deoptimization_on_constant_subscripts_ && array_get->IsInLoop()) {
       HLoopInformation* loop = array_get->GetBlock()->GetLoopInformation();
-      if (loop->IsLoopInvariant(array_get->InputAt(0), false) &&
-          loop->IsLoopInvariant(array_get->InputAt(1), false)) {
+      if (loop->IsDefinedOutOfTheLoop(array_get->InputAt(0)) &&
+          loop->IsDefinedOutOfTheLoop(array_get->InputAt(1))) {
         SideEffects loop_effects = side_effects_.GetLoopEffects(loop->GetHeader());
         if (!array_get->GetSideEffects().MayDependOn(loop_effects)) {
           HoistToPreheaderOrDeoptBlock(loop, array_get);
@@ -1349,7 +1349,7 @@
    * by handling the null check under the hood of the array length operation.
    */
   bool CanHandleLength(HLoopInformation* loop, HInstruction* length, bool needs_taken_test) {
-    if (loop->IsLoopInvariant(length, false)) {
+    if (loop->IsDefinedOutOfTheLoop(length)) {
       return true;
     } else if (length->IsArrayLength() && length->GetBlock()->GetLoopInformation() == loop) {
       if (CanHandleNullCheck(loop, length->InputAt(0), needs_taken_test)) {
@@ -1365,11 +1365,11 @@
    * by generating a deoptimization test.
    */
   bool CanHandleNullCheck(HLoopInformation* loop, HInstruction* check, bool needs_taken_test) {
-    if (loop->IsLoopInvariant(check, false)) {
+    if (loop->IsDefinedOutOfTheLoop(check)) {
       return true;
     } else if (check->IsNullCheck() && check->GetBlock()->GetLoopInformation() == loop) {
       HInstruction* array = check->InputAt(0);
-      if (loop->IsLoopInvariant(array, false)) {
+      if (loop->IsDefinedOutOfTheLoop(array)) {
         // Generate: if (array == null) deoptimize;
         HBasicBlock* block = TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test);
         HInstruction* cond =
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 8e75bdc..2bbf500 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -744,7 +744,7 @@
       soa.Decode<mirror::ClassLoader*>(dex_compilation_unit_->GetClassLoader())));
   Handle<mirror::Class> compiling_class(hs.NewHandle(GetCompilingClass()));
 
-  ArtMethod* resolved_method = class_linker->ResolveMethod(
+  ArtMethod* resolved_method = class_linker->ResolveMethod<ClassLinker::kForceICCECheck>(
       *dex_compilation_unit_->GetDexFile(),
       method_idx,
       dex_compilation_unit_->GetDexCache(),
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index c3979f3..ca71c32 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -90,8 +90,9 @@
 
   static constexpr const char* kBuilderPassName = "builder";
 
-  // The number of entries in a packed switch before we use a jump table.
-  static constexpr uint16_t kSmallSwitchThreshold = 5;
+  // The number of entries in a packed switch before we use a jump table or specified
+  // compare/jump series.
+  static constexpr uint16_t kSmallSwitchThreshold = 3;
 
  private:
   // Analyzes the dex instruction and adds HInstruction to the graph
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index ac6b5e8..0a26786 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -59,7 +59,7 @@
 // S registers. Therefore there is no need to block it.
 static constexpr DRegister DTMP = D31;
 
-static constexpr uint32_t kPackedSwitchJumpTableThreshold = 6;
+static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7;
 
 #define __ down_cast<ArmAssembler*>(codegen->GetAssembler())->
 #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
@@ -6106,7 +6106,7 @@
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
   locations->SetInAt(0, Location::RequiresRegister());
-  if (switch_instr->GetNumEntries() >= kPackedSwitchJumpTableThreshold &&
+  if (switch_instr->GetNumEntries() > kPackedSwitchCompareJumpThreshold &&
       codegen_->GetAssembler()->IsThumb()) {
     locations->AddTemp(Location::RequiresRegister());  // We need a temp for the table base.
     if (switch_instr->GetStartValue() != 0) {
@@ -6122,12 +6122,30 @@
   Register value_reg = locations->InAt(0).AsRegister<Register>();
   HBasicBlock* default_block = switch_instr->GetDefaultBlock();
 
-  if (num_entries < kPackedSwitchJumpTableThreshold || !codegen_->GetAssembler()->IsThumb()) {
+  if (num_entries <= kPackedSwitchCompareJumpThreshold || !codegen_->GetAssembler()->IsThumb()) {
     // Create a series of compare/jumps.
+    Register temp_reg = IP;
+    // Note: It is fine for the below AddConstantSetFlags() using IP register to temporarily store
+    // the immediate, because IP is used as the destination register. For the other
+    // AddConstantSetFlags() and GenerateCompareWithImmediate(), the immediate values are constant,
+    // and they can be encoded in the instruction without making use of IP register.
+    __ AddConstantSetFlags(temp_reg, value_reg, -lower_bound);
+
     const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
-    for (uint32_t i = 0; i < num_entries; i++) {
-      GenerateCompareWithImmediate(value_reg, lower_bound + i);
-      __ b(codegen_->GetLabelOf(successors[i]), EQ);
+    // Jump to successors[0] if value == lower_bound.
+    __ b(codegen_->GetLabelOf(successors[0]), EQ);
+    int32_t last_index = 0;
+    for (; num_entries - last_index > 2; last_index += 2) {
+      __ AddConstantSetFlags(temp_reg, temp_reg, -2);
+      // Jump to successors[last_index + 1] if value < case_value[last_index + 2].
+      __ b(codegen_->GetLabelOf(successors[last_index + 1]), LO);
+      // Jump to successors[last_index + 2] if value == case_value[last_index + 2].
+      __ b(codegen_->GetLabelOf(successors[last_index + 2]), EQ);
+    }
+    if (num_entries - last_index == 2) {
+      // The last missing case_value.
+      GenerateCompareWithImmediate(temp_reg, 1);
+      __ b(codegen_->GetLabelOf(successors[last_index + 1]), EQ);
     }
 
     // And the default for any other value.
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 04acd9d..227f4be 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -71,10 +71,10 @@
 using helpers::ArtVixlRegCodeCoherentForRegSet;
 
 static constexpr int kCurrentMethodStackOffset = 0;
-// The compare/jump sequence will generate about (2 * num_entries + 1) instructions. While jump
+// The compare/jump sequence will generate about (1.5 * num_entries + 3) instructions. While jump
 // table version generates 7 instructions and num_entries literals. Compare/jump sequence will
 // generates less code/data with a small num_entries.
-static constexpr uint32_t kPackedSwitchJumpTableThreshold = 6;
+static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7;
 
 inline Condition ARM64Condition(IfCondition cond) {
   switch (cond) {
@@ -546,7 +546,7 @@
 
 void JumpTableARM64::EmitTable(CodeGeneratorARM64* codegen) {
   uint32_t num_entries = switch_instr_->GetNumEntries();
-  DCHECK_GE(num_entries, kPackedSwitchJumpTableThreshold);
+  DCHECK_GE(num_entries, kPackedSwitchCompareJumpThreshold);
 
   // We are about to use the assembler to place literals directly. Make sure we have enough
   // underlying code buffer and we have generated the jump table with right size.
@@ -4558,20 +4558,29 @@
   // ranges and emit the tables only as required.
   static constexpr int32_t kJumpTableInstructionThreshold = 1* MB / kMaxExpectedSizePerHInstruction;
 
-  if (num_entries < kPackedSwitchJumpTableThreshold ||
+  if (num_entries <= kPackedSwitchCompareJumpThreshold ||
       // Current instruction id is an upper bound of the number of HIRs in the graph.
       GetGraph()->GetCurrentInstructionId() > kJumpTableInstructionThreshold) {
     // Create a series of compare/jumps.
+    UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
+    Register temp = temps.AcquireW();
+    __ Subs(temp, value_reg, Operand(lower_bound));
+
     const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
-    for (uint32_t i = 0; i < num_entries; i++) {
-      int32_t case_value = lower_bound + i;
-      vixl::Label* succ = codegen_->GetLabelOf(successors[i]);
-      if (case_value == 0) {
-        __ Cbz(value_reg, succ);
-      } else {
-        __ Cmp(value_reg, Operand(case_value));
-        __ B(eq, succ);
-      }
+    // Jump to successors[0] if value == lower_bound.
+    __ B(eq, codegen_->GetLabelOf(successors[0]));
+    int32_t last_index = 0;
+    for (; num_entries - last_index > 2; last_index += 2) {
+      __ Subs(temp, temp, Operand(2));
+      // Jump to successors[last_index + 1] if value < case_value[last_index + 2].
+      __ B(lo, codegen_->GetLabelOf(successors[last_index + 1]));
+      // Jump to successors[last_index + 2] if value == case_value[last_index + 2].
+      __ B(eq, codegen_->GetLabelOf(successors[last_index + 2]));
+    }
+    if (num_entries - last_index == 2) {
+      // The last missing case_value.
+      __ Cmp(temp, Operand(1));
+      __ B(eq, codegen_->GetLabelOf(successors[last_index + 1]));
     }
 
     // And the default for any other value.
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 9dc9167..f872bfe 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -4248,19 +4248,31 @@
   HBasicBlock* default_block = switch_instr->GetDefaultBlock();
 
   // Create a set of compare/jumps.
+  Register temp_reg = TMP;
+  __ Addiu32(temp_reg, value_reg, -lower_bound);
+  // Jump to default if index is negative
+  // Note: We don't check the case that index is positive while value < lower_bound, because in
+  // this case, index >= num_entries must be true. So that we can save one branch instruction.
+  __ Bltz(temp_reg, codegen_->GetLabelOf(default_block));
+
   const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
-  for (int32_t i = 0; i < num_entries; ++i) {
-    int32_t case_value = lower_bound + i;
-    MipsLabel* successor_label = codegen_->GetLabelOf(successors[i]);
-    if (case_value == 0) {
-      __ Beqz(value_reg, successor_label);
-    } else {
-      __ LoadConst32(TMP, case_value);
-      __ Beq(value_reg, TMP, successor_label);
-    }
+  // Jump to successors[0] if value == lower_bound.
+  __ Beqz(temp_reg, codegen_->GetLabelOf(successors[0]));
+  int32_t last_index = 0;
+  for (; num_entries - last_index > 2; last_index += 2) {
+    __ Addiu(temp_reg, temp_reg, -2);
+    // Jump to successors[last_index + 1] if value < case_value[last_index + 2].
+    __ Bltz(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
+    // Jump to successors[last_index + 2] if value == case_value[last_index + 2].
+    __ Beqz(temp_reg, codegen_->GetLabelOf(successors[last_index + 2]));
+  }
+  if (num_entries - last_index == 2) {
+    // The last missing case_value.
+    __ Addiu(temp_reg, temp_reg, -1);
+    __ Beqz(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
   }
 
-  // Insert the default branch for every other value.
+  // And the default for any other value.
   if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
     __ B(codegen_->GetLabelOf(default_block));
   }
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index bc5eb31..78f5644 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -3975,17 +3975,34 @@
   GpuRegister value_reg = locations->InAt(0).AsRegister<GpuRegister>();
   HBasicBlock* default_block = switch_instr->GetDefaultBlock();
 
-  // Create a series of compare/jumps.
+  // Create a set of compare/jumps.
+  GpuRegister temp_reg = TMP;
+  if (IsInt<16>(-lower_bound)) {
+    __ Addiu(temp_reg, value_reg, -lower_bound);
+  } else {
+    __ LoadConst32(AT, -lower_bound);
+    __ Addu(temp_reg, value_reg, AT);
+  }
+  // Jump to default if index is negative
+  // Note: We don't check the case that index is positive while value < lower_bound, because in
+  // this case, index >= num_entries must be true. So that we can save one branch instruction.
+  __ Bltzc(temp_reg, codegen_->GetLabelOf(default_block));
+
   const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
-  for (int32_t i = 0; i < num_entries; i++) {
-    int32_t case_value = lower_bound + i;
-    Mips64Label* succ = codegen_->GetLabelOf(successors[i]);
-    if (case_value == 0) {
-      __ Beqzc(value_reg, succ);
-    } else {
-      __ LoadConst32(TMP, case_value);
-      __ Beqc(value_reg, TMP, succ);
-    }
+  // Jump to successors[0] if value == lower_bound.
+  __ Beqzc(temp_reg, codegen_->GetLabelOf(successors[0]));
+  int32_t last_index = 0;
+  for (; num_entries - last_index > 2; last_index += 2) {
+    __ Addiu(temp_reg, temp_reg, -2);
+    // Jump to successors[last_index + 1] if value < case_value[last_index + 2].
+    __ Bltzc(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
+    // Jump to successors[last_index + 2] if value == case_value[last_index + 2].
+    __ Beqzc(temp_reg, codegen_->GetLabelOf(successors[last_index + 2]));
+  }
+  if (num_entries - last_index == 2) {
+    // The last missing case_value.
+    __ Addiu(temp_reg, temp_reg, -1);
+    __ Beqzc(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
   }
 
   // And the default for any other value.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 2fb87d3..19f03df 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -42,7 +42,6 @@
 
 static constexpr int kCurrentMethodStackOffset = 0;
 static constexpr Register kMethodRegisterArgument = EAX;
-
 static constexpr Register kCoreCalleeSaves[] = { EBP, ESI, EDI };
 
 static constexpr int kC2ConditionMask = 0x400;
@@ -6426,31 +6425,67 @@
   locations->SetInAt(0, Location::RequiresRegister());
 }
 
-void InstructionCodeGeneratorX86::VisitPackedSwitch(HPackedSwitch* switch_instr) {
-  int32_t lower_bound = switch_instr->GetStartValue();
-  int32_t num_entries = switch_instr->GetNumEntries();
-  LocationSummary* locations = switch_instr->GetLocations();
-  Register value_reg = locations->InAt(0).AsRegister<Register>();
-  HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+void InstructionCodeGeneratorX86::GenPackedSwitchWithCompares(Register value_reg,
+                                                              int32_t lower_bound,
+                                                              uint32_t num_entries,
+                                                              HBasicBlock* switch_block,
+                                                              HBasicBlock* default_block) {
+  // Figure out the correct compare values and jump conditions.
+  // Handle the first compare/branch as a special case because it might
+  // jump to the default case.
+  DCHECK_GT(num_entries, 2u);
+  Condition first_condition;
+  uint32_t index;
+  const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors();
+  if (lower_bound != 0) {
+    first_condition = kLess;
+    __ cmpl(value_reg, Immediate(lower_bound));
+    __ j(first_condition, codegen_->GetLabelOf(default_block));
+    __ j(kEqual, codegen_->GetLabelOf(successors[0]));
 
-  // Create a series of compare/jumps.
-  const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
-  for (int i = 0; i < num_entries; i++) {
-    int32_t case_value = lower_bound + i;
-    if (case_value == 0) {
-      __ testl(value_reg, value_reg);
-    } else {
-      __ cmpl(value_reg, Immediate(case_value));
-    }
-    __ j(kEqual, codegen_->GetLabelOf(successors[i]));
+    index = 1;
+  } else {
+    // Handle all the compare/jumps below.
+    first_condition = kBelow;
+    index = 0;
+  }
+
+  // Handle the rest of the compare/jumps.
+  for (; index + 1 < num_entries; index += 2) {
+    int32_t compare_to_value = lower_bound + index + 1;
+    __ cmpl(value_reg, Immediate(compare_to_value));
+    // Jump to successors[index] if value < case_value[index].
+    __ j(first_condition, codegen_->GetLabelOf(successors[index]));
+    // Jump to successors[index + 1] if value == case_value[index + 1].
+    __ j(kEqual, codegen_->GetLabelOf(successors[index + 1]));
+  }
+
+  if (index != num_entries) {
+    // There are an odd number of entries. Handle the last one.
+    DCHECK_EQ(index + 1, num_entries);
+    __ cmpl(value_reg, Immediate(lower_bound + index));
+    __ j(kEqual, codegen_->GetLabelOf(successors[index]));
   }
 
   // And the default for any other value.
-  if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
-      __ jmp(codegen_->GetLabelOf(default_block));
+  if (!codegen_->GoesToNextBlock(switch_block, default_block)) {
+    __ jmp(codegen_->GetLabelOf(default_block));
   }
 }
 
+void InstructionCodeGeneratorX86::VisitPackedSwitch(HPackedSwitch* switch_instr) {
+  int32_t lower_bound = switch_instr->GetStartValue();
+  uint32_t num_entries = switch_instr->GetNumEntries();
+  LocationSummary* locations = switch_instr->GetLocations();
+  Register value_reg = locations->InAt(0).AsRegister<Register>();
+
+  GenPackedSwitchWithCompares(value_reg,
+                              lower_bound,
+                              num_entries,
+                              switch_instr->GetBlock(),
+                              switch_instr->GetDefaultBlock());
+}
+
 void LocationsBuilderX86::VisitX86PackedSwitch(HX86PackedSwitch* switch_instr) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
@@ -6465,11 +6500,20 @@
 
 void InstructionCodeGeneratorX86::VisitX86PackedSwitch(HX86PackedSwitch* switch_instr) {
   int32_t lower_bound = switch_instr->GetStartValue();
-  int32_t num_entries = switch_instr->GetNumEntries();
+  uint32_t num_entries = switch_instr->GetNumEntries();
   LocationSummary* locations = switch_instr->GetLocations();
   Register value_reg = locations->InAt(0).AsRegister<Register>();
   HBasicBlock* default_block = switch_instr->GetDefaultBlock();
 
+  if (num_entries <= kPackedSwitchJumpTableThreshold) {
+    GenPackedSwitchWithCompares(value_reg,
+                                lower_bound,
+                                num_entries,
+                                switch_instr->GetBlock(),
+                                default_block);
+    return;
+  }
+
   // Optimizing has a jump area.
   Register temp_reg = locations->GetTemp(0).AsRegister<Register>();
   Register constant_area = locations->InAt(1).AsRegister<Register>();
@@ -6481,7 +6525,7 @@
   }
 
   // Is the value in range?
-  DCHECK_GE(num_entries, 1);
+  DCHECK_GE(num_entries, 1u);
   __ cmpl(value_reg, Immediate(num_entries - 1));
   __ j(kAbove, codegen_->GetLabelOf(default_block));
 
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 064051c..f9403a6 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -195,6 +195,11 @@
 
   X86Assembler* GetAssembler() const { return assembler_; }
 
+  // The compare/jump sequence will generate about (1.5 * num_entries) instructions. A jump
+  // table version generates 7 instructions and num_entries literals. Compare/jump sequence will
+  // generates less code/data with a small num_entries.
+  static constexpr uint32_t kPackedSwitchJumpTableThreshold = 5;
+
  private:
   // Generate code for the given suspend check. If not null, `successor`
   // is the block to branch to if the suspend check is not needed, and after
@@ -236,6 +241,11 @@
   void GenerateFPJumps(HCondition* cond, Label* true_label, Label* false_label);
   void GenerateLongComparesAndJumps(HCondition* cond, Label* true_label, Label* false_label);
   void HandleGoto(HInstruction* got, HBasicBlock* successor);
+  void GenPackedSwitchWithCompares(Register value_reg,
+                                   int32_t lower_bound,
+                                   uint32_t num_entries,
+                                   HBasicBlock* switch_block,
+                                   HBasicBlock* default_block);
 
   X86Assembler* const assembler_;
   CodeGeneratorX86* const codegen_;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 4618be9..44a51ea 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -41,6 +41,10 @@
 
 static constexpr int kCurrentMethodStackOffset = 0;
 static constexpr Register kMethodRegisterArgument = RDI;
+// The compare/jump sequence will generate about (1.5 * num_entries) instructions. A jump
+// table version generates 7 instructions and num_entries literals. Compare/jump sequence will
+// generates less code/data with a small num_entries.
+static constexpr uint32_t kPackedSwitchJumpTableThreshold = 5;
 
 static constexpr Register kCoreCalleeSaves[] = { RBX, RBP, R12, R13, R14, R15 };
 static constexpr FloatRegister kFpuCalleeSaves[] = { XMM12, XMM13, XMM14, XMM15 };
@@ -6021,11 +6025,58 @@
 
 void InstructionCodeGeneratorX86_64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
   int32_t lower_bound = switch_instr->GetStartValue();
-  int32_t num_entries = switch_instr->GetNumEntries();
+  uint32_t num_entries = switch_instr->GetNumEntries();
   LocationSummary* locations = switch_instr->GetLocations();
   CpuRegister value_reg_in = locations->InAt(0).AsRegister<CpuRegister>();
   CpuRegister temp_reg = locations->GetTemp(0).AsRegister<CpuRegister>();
   CpuRegister base_reg = locations->GetTemp(1).AsRegister<CpuRegister>();
+  HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+
+  // Should we generate smaller inline compare/jumps?
+  if (num_entries <= kPackedSwitchJumpTableThreshold) {
+    // Figure out the correct compare values and jump conditions.
+    // Handle the first compare/branch as a special case because it might
+    // jump to the default case.
+    DCHECK_GT(num_entries, 2u);
+    Condition first_condition;
+    uint32_t index;
+    const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
+    if (lower_bound != 0) {
+      first_condition = kLess;
+      __ cmpl(value_reg_in, Immediate(lower_bound));
+      __ j(first_condition, codegen_->GetLabelOf(default_block));
+      __ j(kEqual, codegen_->GetLabelOf(successors[0]));
+
+      index = 1;
+    } else {
+      // Handle all the compare/jumps below.
+      first_condition = kBelow;
+      index = 0;
+    }
+
+    // Handle the rest of the compare/jumps.
+    for (; index + 1 < num_entries; index += 2) {
+      int32_t compare_to_value = lower_bound + index + 1;
+      __ cmpl(value_reg_in, Immediate(compare_to_value));
+      // Jump to successors[index] if value < case_value[index].
+      __ j(first_condition, codegen_->GetLabelOf(successors[index]));
+      // Jump to successors[index + 1] if value == case_value[index + 1].
+      __ j(kEqual, codegen_->GetLabelOf(successors[index + 1]));
+    }
+
+    if (index != num_entries) {
+      // There are an odd number of entries. Handle the last one.
+      DCHECK_EQ(index + 1, num_entries);
+      __ cmpl(value_reg_in, Immediate(lower_bound + index));
+      __ j(kEqual, codegen_->GetLabelOf(successors[index]));
+    }
+
+    // And the default for any other value.
+    if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
+      __ jmp(codegen_->GetLabelOf(default_block));
+    }
+    return;
+  }
 
   // Remove the bias, if needed.
   Register value_reg_out = value_reg_in.AsRegister();
@@ -6036,7 +6087,6 @@
   CpuRegister value_reg(value_reg_out);
 
   // Is the value in range?
-  HBasicBlock* default_block = switch_instr->GetDefaultBlock();
   __ cmpl(value_reg, Immediate(num_entries - 1));
   __ j(kAbove, codegen_->GetLabelOf(default_block));
 
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index fdf8cc9..0b7fdf8 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -705,7 +705,8 @@
       return loop_it->second;
     }
   }
-  if (loop->IsLoopInvariant(instruction, true)) {
+  if (loop->IsDefinedOutOfTheLoop(instruction)) {
+    DCHECK(instruction->GetBlock()->Dominates(loop->GetPreHeader()));
     InductionInfo* info = CreateInvariantFetch(instruction);
     AssignInfo(loop, instruction, info);
     return info;
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 680f89f..389ada7 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -725,7 +725,7 @@
           // instruction is a store in the loop so the loop must does write.
           DCHECK(side_effects_.GetLoopEffects(loop_info->GetHeader()).DoesAnyWrite());
 
-          if (loop_info->IsLoopInvariant(original_ref, false)) {
+          if (loop_info->IsDefinedOutOfTheLoop(original_ref)) {
             DCHECK(original_ref->GetBlock()->Dominates(loop_info->GetPreHeader()));
             // Keep the store since its value may be needed at the loop header.
             possibly_redundant = false;
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 9b26de4..461be25 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -587,15 +587,8 @@
   return other.blocks_.IsBitSet(header_->GetBlockId());
 }
 
-bool HLoopInformation::IsLoopInvariant(HInstruction* instruction, bool must_dominate) const {
-  HLoopInformation* other_loop = instruction->GetBlock()->GetLoopInformation();
-  if (other_loop != this && (other_loop == nullptr || !other_loop->IsIn(*this))) {
-    if (must_dominate) {
-      return instruction->GetBlock()->Dominates(GetHeader());
-    }
-    return true;
-  }
-  return false;
+bool HLoopInformation::IsDefinedOutOfTheLoop(HInstruction* instruction) const {
+  return !blocks_.IsBitSet(instruction->GetBlock()->GetBlockId());
 }
 
 size_t HLoopInformation::GetLifetimeEnd() const {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 9d3c88c..3e38e9f 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -564,11 +564,8 @@
   // Note that `other` *must* be populated before entering this function.
   bool IsIn(const HLoopInformation& other) const;
 
-  // Returns true if instruction is not defined within this loop or any loop nested inside
-  // this loop. If must_dominate is set, only definitions that actually dominate the loop
-  // header can be invariant. Otherwise, any definition outside the loop, including
-  // definitions that appear after the loop, is invariant.
-  bool IsLoopInvariant(HInstruction* instruction, bool must_dominate) const;
+  // Returns true if instruction is not defined within this loop.
+  bool IsDefinedOutOfTheLoop(HInstruction* instruction) const;
 
   const ArenaBitVector& GetBlocks() const { return blocks_; }
 
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index b383f1e..a385448 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -15,6 +15,7 @@
  */
 
 #include "pc_relative_fixups_x86.h"
+#include "code_generator_x86.h"
 
 namespace art {
 namespace x86 {
@@ -79,6 +80,10 @@
   }
 
   void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE {
+    if (switch_insn->GetNumEntries() <=
+        InstructionCodeGeneratorX86::kPackedSwitchJumpTableThreshold) {
+      return;
+    }
     // We need to replace the HPackedSwitch with a HX86PackedSwitch in order to
     // address the constant area.
     InitializePCRelativeBasePointer();
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index dd34924..fea903d 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -469,7 +469,7 @@
       // but then we would need to pass it to RTPVisitor just for this debug check. Since
       // the method is from the String class, the null loader is good enough.
       Handle<mirror::ClassLoader> loader;
-      ArtMethod* method = cl->ResolveMethod(
+      ArtMethod* method = cl->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
           invoke->GetDexFile(), invoke->GetDexMethodIndex(), dex_cache, loader, nullptr, kDirect);
       DCHECK(method != nullptr);
       mirror::Class* declaring_class = method->GetDeclaringClass();
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 39e5259..48465e6 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -57,7 +57,7 @@
       __ LoadFromOffset(kLoadWord, PC, R0, offset.Int32Value());
       break;
     case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (R0).
-      __ LoadFromOffset(kLoadWord, IP, R0, JNIEnvExt::SelfOffset().Int32Value());
+      __ LoadFromOffset(kLoadWord, IP, R0, JNIEnvExt::SelfOffset(4).Int32Value());
       __ LoadFromOffset(kLoadWord, PC, IP, offset.Int32Value());
       break;
     case kQuickAbi:  // R9 holds Thread*.
@@ -91,7 +91,7 @@
     case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (X0).
       __ LoadRawPtr(Arm64ManagedRegister::FromXRegister(IP1),
                       Arm64ManagedRegister::FromXRegister(X0),
-                      Offset(JNIEnvExt::SelfOffset().Int32Value()));
+                      Offset(JNIEnvExt::SelfOffset(8).Int32Value()));
 
       __ JumpTo(Arm64ManagedRegister::FromXRegister(IP1), Offset(offset.Int32Value()),
                 Arm64ManagedRegister::FromXRegister(IP0));
@@ -126,7 +126,7 @@
       __ LoadFromOffset(kLoadWord, T9, A0, offset.Int32Value());
       break;
     case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (A0).
-      __ LoadFromOffset(kLoadWord, T9, A0, JNIEnvExt::SelfOffset().Int32Value());
+      __ LoadFromOffset(kLoadWord, T9, A0, JNIEnvExt::SelfOffset(4).Int32Value());
       __ LoadFromOffset(kLoadWord, T9, T9, offset.Int32Value());
       break;
     case kQuickAbi:  // S1 holds Thread*.
@@ -158,7 +158,7 @@
       __ LoadFromOffset(kLoadDoubleword, T9, A0, offset.Int32Value());
       break;
     case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (A0).
-      __ LoadFromOffset(kLoadDoubleword, T9, A0, JNIEnvExt::SelfOffset().Int32Value());
+      __ LoadFromOffset(kLoadDoubleword, T9, A0, JNIEnvExt::SelfOffset(8).Int32Value());
       __ LoadFromOffset(kLoadDoubleword, T9, T9, offset.Int32Value());
       break;
     case kQuickAbi:  // Fall-through.
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index a1485e4..2aa4085 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -215,7 +215,9 @@
   UsageError("      Example: --base=0x50000000");
   UsageError("");
   UsageError("  --boot-image=<file.art>: provide the image file for the boot class path.");
+  UsageError("      Do not include the arch as part of the name, it is added automatically.");
   UsageError("      Example: --boot-image=/system/framework/boot.art");
+  UsageError("               (specifies /system/framework/<arch>/boot.art as the image file)");
   UsageError("      Default: $ANDROID_ROOT/system/framework/boot.art");
   UsageError("");
   UsageError("  --android-root=<path>: used to locate libraries for portable linking.");
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 571a2f5..74cc899 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -27,6 +27,7 @@
   base/arena_allocator.cc \
   base/arena_bit_vector.cc \
   base/bit_vector.cc \
+  base/file_magic.cc \
   base/hex_dump.cc \
   base/logging.cc \
   base/mutex.cc \
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 8746bad..2cb2212 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1193,7 +1193,8 @@
 
 
 TEST_F(StubTest, StringCompareTo) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
+    (defined(__mips__) && defined(__LP64__)) || (defined(__x86_64__) && !defined(__APPLE__))
   // TODO: Check the "Unresolved" allocation stubs
 
   Thread* self = Thread::Current();
@@ -2042,7 +2043,7 @@
 }
 
 TEST_F(StubTest, StringIndexOf) {
-#if defined(__arm__) || defined(__aarch64__)
+#if defined(__arm__) || defined(__aarch64__) || (defined(__mips__) && defined(__LP64__))
   Thread* self = Thread::Current();
   ScopedObjectAccess soa(self);
   // garbage is created during ClassLinker::Init
diff --git a/runtime/base/file_magic.cc b/runtime/base/file_magic.cc
new file mode 100644
index 0000000..9756338
--- /dev/null
+++ b/runtime/base/file_magic.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "file_magic.h"
+
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include "base/logging.h"
+#include "dex_file.h"
+#include "stringprintf.h"
+
+namespace art {
+
+ScopedFd OpenAndReadMagic(const char* filename, uint32_t* magic, std::string* error_msg) {
+  CHECK(magic != nullptr);
+  ScopedFd fd(open(filename, O_RDONLY, 0));
+  if (fd.get() == -1) {
+    *error_msg = StringPrintf("Unable to open '%s' : %s", filename, strerror(errno));
+    return ScopedFd();
+  }
+  int n = TEMP_FAILURE_RETRY(read(fd.get(), magic, sizeof(*magic)));
+  if (n != sizeof(*magic)) {
+    *error_msg = StringPrintf("Failed to find magic in '%s'", filename);
+    return ScopedFd();
+  }
+  if (lseek(fd.get(), 0, SEEK_SET) != 0) {
+    *error_msg = StringPrintf("Failed to seek to beginning of file '%s' : %s", filename,
+                              strerror(errno));
+    return ScopedFd();
+  }
+  return fd;
+}
+
+bool IsZipMagic(uint32_t magic) {
+  return (('P' == ((magic >> 0) & 0xff)) &&
+          ('K' == ((magic >> 8) & 0xff)));
+}
+
+bool IsDexMagic(uint32_t magic) {
+  return DexFile::IsMagicValid(reinterpret_cast<const uint8_t*>(&magic));
+}
+
+}  // namespace art
diff --git a/runtime/base/file_magic.h b/runtime/base/file_magic.h
new file mode 100644
index 0000000..f7e4bad
--- /dev/null
+++ b/runtime/base/file_magic.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_FILE_MAGIC_H_
+#define ART_RUNTIME_BASE_FILE_MAGIC_H_
+
+#include <stdint.h>
+#include <string>
+
+#include "ScopedFd.h"
+
+namespace art {
+
+// Open file and read magic number
+ScopedFd OpenAndReadMagic(const char* filename, uint32_t* magic, std::string* error_msg);
+
+// Check whether the given magic matches a known file type.
+bool IsZipMagic(uint32_t magic);
+bool IsDexMagic(uint32_t magic);
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_BASE_FILE_MAGIC_H_
diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc
index 07cadc4..78bc3d5 100644
--- a/runtime/base/unix_file/fd_file.cc
+++ b/runtime/base/unix_file/fd_file.cc
@@ -17,12 +17,22 @@
 #include "base/unix_file/fd_file.h"
 
 #include <errno.h>
+#include <limits>
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <unistd.h>
 
 #include "base/logging.h"
 
+// Includes needed for FdFile::Copy().
+#ifdef __linux__
+#include <sys/sendfile.h>
+#else
+#include <algorithm>
+#include "base/stl_util.h"
+#include "globals.h"
+#endif
+
 namespace unix_file {
 
 FdFile::FdFile() : guard_state_(GuardState::kClosed), fd_(-1), auto_close_(true) {
@@ -222,6 +232,52 @@
   return true;
 }
 
+bool FdFile::Copy(FdFile* input_file, int64_t offset, int64_t size) {
+  off_t off = static_cast<off_t>(offset);
+  off_t sz = static_cast<off_t>(size);
+  if (offset < 0 || static_cast<int64_t>(off) != offset ||
+      size < 0 || static_cast<int64_t>(sz) != size ||
+      sz > std::numeric_limits<off_t>::max() - off) {
+    errno = EINVAL;
+    return false;
+  }
+  if (size == 0) {
+    return true;
+  }
+#ifdef __linux__
+  // Use sendfile(), available for files since linux kernel 2.6.33.
+  off_t end = off + sz;
+  while (off != end) {
+    int result = TEMP_FAILURE_RETRY(
+        sendfile(Fd(), input_file->Fd(), &off, end - off));
+    if (result == -1) {
+      return false;
+    }
+    // Ignore the number of bytes in `result`, sendfile() already updated `off`.
+  }
+#else
+  if (lseek(input_file->Fd(), off, SEEK_SET) != off) {
+    return false;
+  }
+  constexpr size_t kMaxBufferSize = 4 * ::art::kPageSize;
+  const size_t buffer_size = std::min<uint64_t>(size, kMaxBufferSize);
+  art::UniqueCPtr<void> buffer(malloc(buffer_size));
+  if (buffer == nullptr) {
+    errno = ENOMEM;
+    return false;
+  }
+  while (size != 0) {
+    size_t chunk_size = std::min<uint64_t>(buffer_size, size);
+    if (!input_file->ReadFully(buffer.get(), chunk_size) ||
+        !WriteFully(buffer.get(), chunk_size)) {
+      return false;
+    }
+    size -= chunk_size;
+  }
+#endif
+  return true;
+}
+
 void FdFile::Erase() {
   TEMP_FAILURE_RETRY(SetLength(0));
   TEMP_FAILURE_RETRY(Flush());
diff --git a/runtime/base/unix_file/fd_file.h b/runtime/base/unix_file/fd_file.h
index f47368b..231a1ab 100644
--- a/runtime/base/unix_file/fd_file.h
+++ b/runtime/base/unix_file/fd_file.h
@@ -50,12 +50,12 @@
   bool Open(const std::string& file_path, int flags, mode_t mode);
 
   // RandomAccessFile API.
-  virtual int Close() WARN_UNUSED;
-  virtual int64_t Read(char* buf, int64_t byte_count, int64_t offset) const WARN_UNUSED;
-  virtual int SetLength(int64_t new_length) WARN_UNUSED;
-  virtual int64_t GetLength() const;
-  virtual int64_t Write(const char* buf, int64_t byte_count, int64_t offset) WARN_UNUSED;
-  virtual int Flush() WARN_UNUSED;
+  int Close() OVERRIDE WARN_UNUSED;
+  int64_t Read(char* buf, int64_t byte_count, int64_t offset) const OVERRIDE WARN_UNUSED;
+  int SetLength(int64_t new_length) OVERRIDE WARN_UNUSED;
+  int64_t GetLength() const OVERRIDE;
+  int64_t Write(const char* buf, int64_t byte_count, int64_t offset) OVERRIDE WARN_UNUSED;
+  int Flush() OVERRIDE WARN_UNUSED;
 
   // Short for SetLength(0); Flush(); Close();
   void Erase();
@@ -77,6 +77,9 @@
   bool PreadFully(void* buffer, size_t byte_count, size_t offset) WARN_UNUSED;
   bool WriteFully(const void* buffer, size_t byte_count) WARN_UNUSED;
 
+  // Copy data from another file.
+  bool Copy(FdFile* input_file, int64_t offset, int64_t size);
+
   // This enum is public so that we can define the << operator over it.
   enum class GuardState {
     kBase,           // Base, file has not been flushed or closed.
diff --git a/runtime/base/unix_file/fd_file_test.cc b/runtime/base/unix_file/fd_file_test.cc
index 388f717..ecf607c 100644
--- a/runtime/base/unix_file/fd_file_test.cc
+++ b/runtime/base/unix_file/fd_file_test.cc
@@ -110,4 +110,34 @@
   ASSERT_EQ(file.Close(), 0);
 }
 
+TEST_F(FdFileTest, Copy) {
+  art::ScratchFile src_tmp;
+  FdFile src;
+  ASSERT_TRUE(src.Open(src_tmp.GetFilename(), O_RDWR));
+  ASSERT_GE(src.Fd(), 0);
+  ASSERT_TRUE(src.IsOpened());
+
+  char src_data[] = "Some test data.";
+  ASSERT_TRUE(src.WriteFully(src_data, sizeof(src_data)));  // Including the zero terminator.
+  ASSERT_EQ(0, src.Flush());
+  ASSERT_EQ(static_cast<int64_t>(sizeof(src_data)), src.GetLength());
+
+  art::ScratchFile dest_tmp;
+  FdFile dest;
+  ASSERT_TRUE(dest.Open(src_tmp.GetFilename(), O_RDWR));
+  ASSERT_GE(dest.Fd(), 0);
+  ASSERT_TRUE(dest.IsOpened());
+
+  ASSERT_TRUE(dest.Copy(&src, 0, sizeof(src_data)));
+  ASSERT_EQ(0, dest.Flush());
+  ASSERT_EQ(static_cast<int64_t>(sizeof(src_data)), dest.GetLength());
+
+  char check_data[sizeof(src_data)];
+  ASSERT_TRUE(dest.PreadFully(check_data, sizeof(src_data), 0u));
+  CHECK_EQ(0, memcmp(check_data, src_data, sizeof(src_data)));
+
+  ASSERT_EQ(0, dest.Close());
+  ASSERT_EQ(0, src.Close());
+}
+
 }  // namespace unix_file
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 88a3996..a5d10b2 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -116,6 +116,7 @@
   return resolved_method;
 }
 
+template <ClassLinker::ResolveMode kResolveMode>
 inline ArtMethod* ClassLinker::ResolveMethod(Thread* self,
                                              uint32_t method_idx,
                                              ArtMethod* referrer,
@@ -127,12 +128,12 @@
     Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
     Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
     const DexFile* dex_file = h_dex_cache->GetDexFile();
-    resolved_method = ResolveMethod(*dex_file,
-                                    method_idx,
-                                    h_dex_cache,
-                                    h_class_loader,
-                                    referrer,
-                                    type);
+    resolved_method = ResolveMethod<kResolveMode>(*dex_file,
+                                                  method_idx,
+                                                  h_dex_cache,
+                                                  h_class_loader,
+                                                  referrer,
+                                                  type);
   }
   // Note: We cannot check here to see whether we added the method to the cache. It
   //       might be an erroneous class, which results in it being hidden from us.
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 879544f..0a37f26 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -6160,6 +6160,7 @@
   return resolved;
 }
 
+template <ClassLinker::ResolveMode kResolveMode>
 ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file,
                                       uint32_t method_idx,
                                       Handle<mirror::DexCache> dex_cache,
@@ -6171,6 +6172,12 @@
   ArtMethod* resolved = dex_cache->GetResolvedMethod(method_idx, image_pointer_size_);
   if (resolved != nullptr && !resolved->IsRuntimeMethod()) {
     DCHECK(resolved->GetDeclaringClassUnchecked() != nullptr) << resolved->GetDexMethodIndex();
+    if (kResolveMode == ClassLinker::kForceICCECheck) {
+      if (resolved->CheckIncompatibleClassChange(type)) {
+        ThrowIncompatibleClassChangeError(type, resolved->GetInvokeType(), resolved, referrer);
+        return nullptr;
+      }
+    }
     return resolved;
   }
   // Fail, get the declaring class.
@@ -6189,8 +6196,36 @@
       DCHECK(resolved == nullptr || resolved->GetDeclaringClassUnchecked() != nullptr);
       break;
     case kInterface:
-      resolved = klass->FindInterfaceMethod(dex_cache.Get(), method_idx, image_pointer_size_);
-      DCHECK(resolved == nullptr || resolved->GetDeclaringClass()->IsInterface());
+      // We have to check whether the method id really belongs to an interface (dex static bytecode
+      // constraint A15). Otherwise you must not invoke-interface on it.
+      //
+      // This is not symmetric to A12-A14 (direct, static, virtual), as using FindInterfaceMethod
+      // assumes that the given type is an interface, and will check the interface table if the
+      // method isn't declared in the class. So it may find an interface method (usually by name
+      // in the handling below, but we do the constraint check early). In that case,
+      // CheckIncompatibleClassChange will succeed (as it is called on an interface method)
+      // unexpectedly.
+      // Example:
+      //    interface I {
+      //      foo()
+      //    }
+      //    class A implements I {
+      //      ...
+      //    }
+      //    class B extends A {
+      //      ...
+      //    }
+      //    invoke-interface B.foo
+      //      -> FindInterfaceMethod finds I.foo (interface method), not A.foo (miranda method)
+      if (UNLIKELY(!klass->IsInterface())) {
+        ThrowIncompatibleClassChangeError(klass,
+                                          "Found class %s, but interface was expected",
+                                          PrettyDescriptor(klass).c_str());
+        return nullptr;
+      } else {
+        resolved = klass->FindInterfaceMethod(dex_cache.Get(), method_idx, image_pointer_size_);
+        DCHECK(resolved == nullptr || resolved->GetDeclaringClass()->IsInterface());
+      }
       break;
     case kSuper:  // Fall-through.
     case kVirtual:
@@ -6792,4 +6827,20 @@
   }
 }
 
+// Instantiate ResolveMethod.
+template ArtMethod* ClassLinker::ResolveMethod<ClassLinker::kForceICCECheck>(
+    const DexFile& dex_file,
+    uint32_t method_idx,
+    Handle<mirror::DexCache> dex_cache,
+    Handle<mirror::ClassLoader> class_loader,
+    ArtMethod* referrer,
+    InvokeType type);
+template ArtMethod* ClassLinker::ResolveMethod<ClassLinker::kNoICCECheckForCache>(
+    const DexFile& dex_file,
+    uint32_t method_idx,
+    Handle<mirror::DexCache> dex_cache,
+    Handle<mirror::ClassLoader> class_loader,
+    ArtMethod* referrer,
+    InvokeType type);
+
 }  // namespace art
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 29aac31..0d3bc1e 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -246,11 +246,19 @@
       SHARED_REQUIRES(Locks::mutator_lock_)
       REQUIRES(!dex_lock_, !Roles::uninterruptible_);
 
+  // Determine whether a dex cache result should be trusted, or an IncompatibleClassChangeError
+  // check should be performed even after a hit.
+  enum ResolveMode {  // private.
+    kNoICCECheckForCache,
+    kForceICCECheck
+  };
+
   // Resolve a method with a given ID from the DexFile, storing the
   // result in DexCache. The ClassLinker and ClassLoader are used as
   // in ResolveType. What is unique is the method type argument which
   // is used to determine if this method is a direct, static, or
   // virtual method.
+  template <ResolveMode kResolveMode>
   ArtMethod* ResolveMethod(const DexFile& dex_file,
                            uint32_t method_idx,
                            Handle<mirror::DexCache> dex_cache,
@@ -262,6 +270,7 @@
 
   ArtMethod* GetResolvedMethod(uint32_t method_idx, ArtMethod* referrer)
       SHARED_REQUIRES(Locks::mutator_lock_);
+  template <ResolveMode kResolveMode>
   ArtMethod* ResolveMethod(Thread* self, uint32_t method_idx, ArtMethod* referrer, InvokeType type)
       SHARED_REQUIRES(Locks::mutator_lock_)
       REQUIRES(!dex_lock_, !Roles::uninterruptible_);
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 4163e2e..30d921a 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -29,6 +29,7 @@
 
 #include "art_field-inl.h"
 #include "art_method-inl.h"
+#include "base/file_magic.h"
 #include "base/hash_map.h"
 #include "base/logging.h"
 #include "base/stl_util.h"
@@ -62,26 +63,6 @@
 const uint8_t DexFile::kDexMagic[] = { 'd', 'e', 'x', '\n' };
 const uint8_t DexFile::kDexMagicVersion[] = { '0', '3', '5', '\0' };
 
-static int OpenAndReadMagic(const char* filename, uint32_t* magic, std::string* error_msg) {
-  CHECK(magic != nullptr);
-  ScopedFd fd(open(filename, O_RDONLY, 0));
-  if (fd.get() == -1) {
-    *error_msg = StringPrintf("Unable to open '%s' : %s", filename, strerror(errno));
-    return -1;
-  }
-  int n = TEMP_FAILURE_RETRY(read(fd.get(), magic, sizeof(*magic)));
-  if (n != sizeof(*magic)) {
-    *error_msg = StringPrintf("Failed to find magic in '%s'", filename);
-    return -1;
-  }
-  if (lseek(fd.get(), 0, SEEK_SET) != 0) {
-    *error_msg = StringPrintf("Failed to seek to beginning of file '%s' : %s", filename,
-                              strerror(errno));
-    return -1;
-  }
-  return fd.release();
-}
-
 bool DexFile::GetChecksum(const char* filename, uint32_t* checksum, std::string* error_msg) {
   CHECK(checksum != nullptr);
   uint32_t magic;
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index dccb1da..ba2fb94 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -68,7 +68,7 @@
     class_loader.Assign(caller->GetClassLoader());
   }
 
-  return class_linker->ResolveMethod(
+  return class_linker->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
       *outer_method->GetDexFile(), method_index, dex_cache, class_loader, nullptr, invoke_type);
 }
 
@@ -401,7 +401,10 @@
     mirror::Object* null_this = nullptr;
     HandleWrapper<mirror::Object> h_this(
         hs.NewHandleWrapper(type == kStatic ? &null_this : this_object));
-    resolved_method = class_linker->ResolveMethod(self, method_idx, referrer, type);
+    constexpr ClassLinker::ResolveMode resolve_mode =
+        access_check ? ClassLinker::kForceICCECheck
+                     : ClassLinker::kNoICCECheckForCache;
+    resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
   }
   if (UNLIKELY(resolved_method == nullptr)) {
     DCHECK(self->IsExceptionPending());  // Throw exception and unwind.
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 2c8ed88..08c9b49 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1015,7 +1015,8 @@
     HandleWrapper<mirror::Object> h_receiver(
         hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy));
     DCHECK_EQ(caller->GetDexFile(), called_method.dex_file);
-    called = linker->ResolveMethod(self, called_method.dex_method_index, caller, invoke_type);
+    called = linker->ResolveMethod<ClassLinker::kForceICCECheck>(
+        self, called_method.dex_method_index, caller, invoke_type);
   }
   const void* code = nullptr;
   if (LIKELY(!self->IsExceptionPending())) {
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index d13526b..2d0ae63 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -344,8 +344,11 @@
     segment_state_.all = new_state;
   }
 
-  static Offset SegmentStateOffset() {
-    return Offset(OFFSETOF_MEMBER(IndirectReferenceTable, segment_state_));
+  static Offset SegmentStateOffset(size_t pointer_size ATTRIBUTE_UNUSED) {
+    // Note: Currently segment_state_ is at offset 0. We're testing the expected value in
+    //       jni_internal_test to make sure it stays correct. It is not OFFSETOF_MEMBER, as that
+    //       is not pointer-size-safe.
+    return Offset(0);
   }
 
   // Release pages past the end of the table that may have previously held references.
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
index dab1040..aa25f67 100644
--- a/runtime/jni_env_ext.cc
+++ b/runtime/jni_env_ext.cc
@@ -105,9 +105,32 @@
   stacked_local_ref_cookies.pop_back();
 }
 
-Offset JNIEnvExt::SegmentStateOffset() {
-  return Offset(OFFSETOF_MEMBER(JNIEnvExt, locals) +
-                IndirectReferenceTable::SegmentStateOffset().Int32Value());
+// Note: the offset code is brittle, as we can't use OFFSETOF_MEMBER or offsetof easily. Thus, there
+//       are tests in jni_internal_test to match the results against the actual values.
+
+// This is encoding the knowledge of the structure and layout of JNIEnv fields.
+static size_t JNIEnvSize(size_t pointer_size) {
+  // A single pointer.
+  return pointer_size;
+}
+
+Offset JNIEnvExt::SegmentStateOffset(size_t pointer_size) {
+  size_t locals_offset = JNIEnvSize(pointer_size) +
+                         2 * pointer_size +          // Thread* self + JavaVMExt* vm.
+                         4 +                         // local_ref_cookie.
+                         (pointer_size - 4);         // Padding.
+  size_t irt_segment_state_offset =
+      IndirectReferenceTable::SegmentStateOffset(pointer_size).Int32Value();
+  return Offset(locals_offset + irt_segment_state_offset);
+}
+
+Offset JNIEnvExt::LocalRefCookieOffset(size_t pointer_size) {
+  return Offset(JNIEnvSize(pointer_size) +
+                2 * pointer_size);          // Thread* self + JavaVMExt* vm
+}
+
+Offset JNIEnvExt::SelfOffset(size_t pointer_size) {
+  return Offset(JNIEnvSize(pointer_size));
 }
 
 // Use some defining part of the caller's frame as the identifying mark for the JNI segment.
diff --git a/runtime/jni_env_ext.h b/runtime/jni_env_ext.h
index 3828ff0..2f8decf 100644
--- a/runtime/jni_env_ext.h
+++ b/runtime/jni_env_ext.h
@@ -50,15 +50,9 @@
   T AddLocalReference(mirror::Object* obj)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
-  static Offset SegmentStateOffset();
-
-  static Offset LocalRefCookieOffset() {
-    return Offset(OFFSETOF_MEMBER(JNIEnvExt, local_ref_cookie));
-  }
-
-  static Offset SelfOffset() {
-    return Offset(OFFSETOF_MEMBER(JNIEnvExt, self));
-  }
+  static Offset SegmentStateOffset(size_t pointer_size);
+  static Offset LocalRefCookieOffset(size_t pointer_size);
+  static Offset SelfOffset(size_t pointer_size);
 
   jobject NewLocalRef(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
   void DeleteLocalRef(jobject obj) SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index d1687d7..b41d16b 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -18,7 +18,9 @@
 
 #include "art_method-inl.h"
 #include "common_compiler_test.h"
+#include "indirect_reference_table.h"
 #include "java_vm_ext.h"
+#include "jni_env_ext.h"
 #include "mirror/string-inl.h"
 #include "scoped_thread_state_change.h"
 #include "ScopedLocalRef.h"
@@ -2261,4 +2263,41 @@
   env_->DeleteGlobalRef(global_ref);
 }
 
+// Test the offset computation of IndirectReferenceTable offsets. b/26071368.
+TEST_F(JniInternalTest, IndirectReferenceTableOffsets) {
+  // The segment_state_ field is private, and we want to avoid friend declaration. So we'll check
+  // by modifying memory.
+  // The parameters don't really matter here.
+  IndirectReferenceTable irt(5, 5, IndirectRefKind::kGlobal, true);
+  uint32_t old_state = irt.GetSegmentState();
+
+  // Write some new state directly. We invert parts of old_state to ensure a new value.
+  uint32_t new_state = old_state ^ 0x07705005;
+  ASSERT_NE(old_state, new_state);
+
+  uint8_t* base = reinterpret_cast<uint8_t*>(&irt);
+  int32_t segment_state_offset =
+      IndirectReferenceTable::SegmentStateOffset(sizeof(void*)).Int32Value();
+  *reinterpret_cast<uint32_t*>(base + segment_state_offset) = new_state;
+
+  // Read and compare.
+  EXPECT_EQ(new_state, irt.GetSegmentState());
+}
+
+// Test the offset computation of JNIEnvExt offsets. b/26071368.
+TEST_F(JniInternalTest, JNIEnvExtOffsets) {
+  EXPECT_EQ(OFFSETOF_MEMBER(JNIEnvExt, local_ref_cookie),
+            JNIEnvExt::LocalRefCookieOffset(sizeof(void*)).Int32Value());
+
+  EXPECT_EQ(OFFSETOF_MEMBER(JNIEnvExt, self), JNIEnvExt::SelfOffset(sizeof(void*)).Int32Value());
+
+  // segment_state_ is private in the IndirectReferenceTable. So this test isn't as good as we'd
+  // hope it to be.
+  int32_t segment_state_now =
+      OFFSETOF_MEMBER(JNIEnvExt, locals) +
+      IndirectReferenceTable::SegmentStateOffset(sizeof(void*)).Int32Value();
+  int32_t segment_state_computed = JNIEnvExt::SegmentStateOffset(sizeof(void*)).Int32Value();
+  EXPECT_EQ(segment_state_now, segment_state_computed);
+}
+
 }  // namespace art
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 68db7e3..eddc3a4 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1392,21 +1392,6 @@
   return filename;
 }
 
-bool IsZipMagic(uint32_t magic) {
-  return (('P' == ((magic >> 0) & 0xff)) &&
-          ('K' == ((magic >> 8) & 0xff)));
-}
-
-bool IsDexMagic(uint32_t magic) {
-  return DexFile::IsMagicValid(reinterpret_cast<const uint8_t*>(&magic));
-}
-
-bool IsOatMagic(uint32_t magic) {
-  return (memcmp(reinterpret_cast<const uint8_t*>(magic),
-                 OatHeader::kOatMagic,
-                 sizeof(OatHeader::kOatMagic)) == 0);
-}
-
 bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg) {
   const std::string command_line(Join(arg_vector, ' '));
 
diff --git a/runtime/utils.h b/runtime/utils.h
index 8b7941a..5b9e963 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -273,11 +273,6 @@
 // Returns the system location for an image
 std::string GetSystemImageFilename(const char* location, InstructionSet isa);
 
-// Check whether the given magic matches a known file type.
-bool IsZipMagic(uint32_t magic);
-bool IsDexMagic(uint32_t magic);
-bool IsOatMagic(uint32_t magic);
-
 // Wrapper on fork/execv to run a command in a subprocess.
 bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg);
 
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 364b8ce..cf27ff2 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -192,7 +192,7 @@
     }
     previous_method_idx = method_idx;
     InvokeType type = it->GetMethodInvokeType(*class_def);
-    ArtMethod* method = linker->ResolveMethod(
+    ArtMethod* method = linker->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
         *dex_file, method_idx, dex_cache, class_loader, nullptr, type);
     if (method == nullptr) {
       DCHECK(self->IsExceptionPending());
@@ -3638,6 +3638,30 @@
   const RegType& referrer = GetDeclaringClass();
   auto* cl = Runtime::Current()->GetClassLinker();
   auto pointer_size = cl->GetImagePointerSize();
+
+  // Check that interface methods are static or match interface classes.
+  // We only allow statics if we don't have default methods enabled.
+  if (klass->IsInterface()) {
+    Runtime* runtime = Runtime::Current();
+    const bool default_methods_supported =
+        runtime == nullptr ||
+        runtime->AreExperimentalFlagsEnabled(ExperimentalFlags::kDefaultMethods);
+    if (method_type != METHOD_INTERFACE &&
+        (!default_methods_supported || method_type != METHOD_STATIC)) {
+      Fail(VERIFY_ERROR_CLASS_CHANGE)
+          << "non-interface method " << PrettyMethod(dex_method_idx, *dex_file_)
+          << " is in an interface class " << PrettyClass(klass);
+      return nullptr;
+    }
+  } else {
+    if (method_type == METHOD_INTERFACE) {
+      Fail(VERIFY_ERROR_CLASS_CHANGE)
+          << "interface method " << PrettyMethod(dex_method_idx, *dex_file_)
+          << " is in a non-interface class " << PrettyClass(klass);
+      return nullptr;
+    }
+  }
+
   ArtMethod* res_method = dex_cache_->GetResolvedMethod(dex_method_idx, pointer_size);
   if (res_method == nullptr) {
     const char* name = dex_file_->GetMethodName(method_id);
@@ -3692,23 +3716,6 @@
                                       << PrettyMethod(res_method);
     return nullptr;
   }
-  // Check that interface methods are static or match interface classes.
-  // We only allow statics if we don't have default methods enabled.
-  Runtime* runtime = Runtime::Current();
-  const bool default_methods_supported =
-      runtime == nullptr ||
-      runtime->AreExperimentalFlagsEnabled(ExperimentalFlags::kDefaultMethods);
-  if (klass->IsInterface() &&
-      method_type != METHOD_INTERFACE &&
-      (!default_methods_supported || method_type != METHOD_STATIC)) {
-    Fail(VERIFY_ERROR_CLASS_CHANGE) << "non-interface method " << PrettyMethod(res_method)
-                                    << " is in an interface class " << PrettyClass(klass);
-    return nullptr;
-  } else if (!klass->IsInterface() && method_type == METHOD_INTERFACE) {
-    Fail(VERIFY_ERROR_CLASS_CHANGE) << "interface method " << PrettyMethod(res_method)
-                                    << " is in a non-interface class " << PrettyClass(klass);
-    return nullptr;
-  }
   // See if the method type implied by the invoke instruction matches the access flags for the
   // target method.
   if ((method_type == METHOD_DIRECT && (!res_method->IsDirect() || res_method->IsStatic())) ||
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index a590cf1..ebefeea 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -47,4 +47,5 @@
 b/23502994 (if-eqz)
 b/23502994 (check-cast)
 b/25494456
+b/21869691
 Done!
diff --git a/test/800-smali/smali/b_21869691A.smali b/test/800-smali/smali/b_21869691A.smali
new file mode 100644
index 0000000..a7a6ef4
--- /dev/null
+++ b/test/800-smali/smali/b_21869691A.smali
@@ -0,0 +1,47 @@
+# Test that the verifier does not stash methods incorrectly because they are being invoked with
+# the wrong opcode.
+#
+# When using invoke-interface on a method id that is not from an interface class, we should throw
+# an IncompatibleClassChangeError. FindInterfaceMethod assumes that the given type is an interface,
+# so we can construct a class hierarchy that would have a surprising result:
+#
+#   interface I {
+#     void a();
+#   }
+#
+#   class B implements I {
+#      // miranda method for a, or a implemented.
+#   }
+#
+#   class C extends B {
+#   }
+#
+# Then calling invoke-interface C.a() will go wrong if there is no explicit check: a can't be found
+# in C, but in the interface table, so we will find an interface method and pass ICCE checks.
+#
+# If we do this before a correct invoke-virtual C.a(), we poison the dex cache with an incorrect
+# method. In this test, this is done in A (A < B, so processed first). The "real" call is in B.
+
+.class public LB21869691A;
+
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+    .registers 1
+    invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+    return-void
+.end method
+
+.method public run()V
+  .registers 3
+  new-instance v0, LB21869691C;
+  invoke-direct {v0}, LB21869691C;-><init>()V
+  invoke-virtual {v2, v0}, LB21869691A;->callinf(LB21869691C;)V
+  return-void
+.end method
+
+.method public callinf(LB21869691C;)V
+  .registers 2
+  invoke-interface {p1}, LB21869691C;->a()V
+  return-void
+.end method
diff --git a/test/800-smali/smali/b_21869691B.smali b/test/800-smali/smali/b_21869691B.smali
new file mode 100644
index 0000000..1172bdb
--- /dev/null
+++ b/test/800-smali/smali/b_21869691B.smali
@@ -0,0 +1,33 @@
+# Test that the verifier does not stash methods incorrectly because they are being invoked with
+# the wrong opcode. See b_21869691A.smali for explanation.
+
+.class public abstract LB21869691B;
+
+.super Ljava/lang/Object;
+.implements LB21869691I;
+
+.method protected constructor <init>()V
+    .registers 1
+    invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+    return-void
+.end method
+
+# Have an implementation for the interface method.
+.method public a()V
+  .registers 1
+  return-void
+.end method
+
+# Call ourself with invoke-virtual.
+.method public callB()V
+  .registers 1
+  invoke-virtual {p0}, LB21869691B;->a()V
+  return-void
+.end method
+
+# Call C with invoke-virtual.
+.method public callB(LB21869691C;)V
+  .registers 2
+  invoke-virtual {p1}, LB21869691C;->a()V
+  return-void
+.end method
diff --git a/test/800-smali/smali/b_21869691C.smali b/test/800-smali/smali/b_21869691C.smali
new file mode 100644
index 0000000..4f89a04
--- /dev/null
+++ b/test/800-smali/smali/b_21869691C.smali
@@ -0,0 +1,12 @@
+# Test that the verifier does not stash methods incorrectly because they are being invoked with
+# the wrong opcode. See b_21869691A.smali for explanation.
+
+.class public LB21869691C;
+
+.super LB21869691B;
+
+.method public constructor <init>()V
+    .registers 1
+    invoke-direct {p0}, LB21869691B;-><init>()V
+    return-void
+.end method
diff --git a/test/800-smali/smali/b_21869691I.smali b/test/800-smali/smali/b_21869691I.smali
new file mode 100644
index 0000000..72a27dd
--- /dev/null
+++ b/test/800-smali/smali/b_21869691I.smali
@@ -0,0 +1,11 @@
+# Test that the verifier does not stash methods incorrectly because they are being invoked with
+# the wrong opcode.
+#
+# This is the interface class that has an "a" method.
+
+.class public abstract interface LB21869691I;
+
+.super Ljava/lang/Object;
+
+.method public abstract a()V
+.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index 4844848..3b62a46 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -139,6 +139,8 @@
                 new Object[] { "abc" }, null, null));
         testCases.add(new TestCase("b/25494456", "B25494456", "run", null, new VerifyError(),
                 null));
+        testCases.add(new TestCase("b/21869691", "B21869691A", "run", null,
+                new IncompatibleClassChangeError(), null));
     }
 
     public void runTests() {
@@ -208,7 +210,7 @@
                                                         tc.expectedException.getClass().getName() +
                                                         ", but got " + exc.getClass(), exc);
             } else {
-              // Expected exception, do nothing.
+                // Expected exception, do nothing.
             }
         } finally {
             if (errorReturn != null) {