Merge "ART: Fix crash accessing .bss for non-executable oat file."
diff --git a/compiler/optimizing/instruction_simplifier_mips.h b/compiler/optimizing/instruction_simplifier_mips.h
index 22cc2ef..6cb8aff 100644
--- a/compiler/optimizing/instruction_simplifier_mips.h
+++ b/compiler/optimizing/instruction_simplifier_mips.h
@@ -30,7 +30,7 @@
 class InstructionSimplifierMips : public HOptimization {
  public:
   InstructionSimplifierMips(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
-      : HOptimization(graph, "instruction_simplifier_mips", stats),
+      : HOptimization(graph, kInstructionSimplifierMipsPassName, stats),
         codegen_(down_cast<CodeGeneratorMIPS*>(codegen)) {}
 
   static constexpr const char* kInstructionSimplifierMipsPassName = "instruction_simplifier_mips";
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index a281c4a..73c72fc 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1196,7 +1196,69 @@
 
   Runtime* runtime = Runtime::Current();
   ArenaAllocator allocator(runtime->GetJitArenaPool());
-  ArenaStack arena_stack(Runtime::Current()->GetJitArenaPool());
+
+  if (UNLIKELY(method->IsNative())) {
+    JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
+        GetCompilerDriver(), access_flags, method_idx, *dex_file);
+    ScopedNullHandle<mirror::ObjectArray<mirror::Object>> roots;
+    ArenaSet<ArtMethod*, std::less<ArtMethod*>> cha_single_implementation_list(
+        allocator.Adapter(kArenaAllocCHA));
+    const void* code = code_cache->CommitCode(
+        self,
+        method,
+        /* stack_map_data */ nullptr,
+        /* method_info_data */ nullptr,
+        /* roots_data */ nullptr,
+        jni_compiled_method.GetFrameSize(),
+        jni_compiled_method.GetCoreSpillMask(),
+        jni_compiled_method.GetFpSpillMask(),
+        jni_compiled_method.GetCode().data(),
+        jni_compiled_method.GetCode().size(),
+        /* data_size */ 0u,
+        osr,
+        roots,
+        /* has_should_deoptimize_flag */ false,
+        cha_single_implementation_list);
+    if (code == nullptr) {
+      return false;
+    }
+
+    const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
+    if (compiler_options.GetGenerateDebugInfo()) {
+      const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code);
+      const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
+      debug::MethodDebugInfo info = {};
+      DCHECK(info.trampoline_name.empty());
+      info.dex_file = dex_file;
+      info.class_def_index = class_def_idx;
+      info.dex_method_index = method_idx;
+      info.access_flags = access_flags;
+      info.code_item = code_item;
+      info.isa = jni_compiled_method.GetInstructionSet();
+      info.deduped = false;
+      info.is_native_debuggable = compiler_options.GetNativeDebuggable();
+      info.is_optimized = true;
+      info.is_code_address_text_relative = false;
+      info.code_address = code_address;
+      info.code_size = jni_compiled_method.GetCode().size();
+      info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
+      info.code_info = nullptr;
+      info.cfi = jni_compiled_method.GetCfi();
+      std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForMethods(
+          GetCompilerDriver()->GetInstructionSet(),
+          GetCompilerDriver()->GetInstructionSetFeatures(),
+          ArrayRef<const debug::MethodDebugInfo>(&info, 1));
+      CreateJITCodeEntryForAddress(code_address, std::move(elf_file));
+    }
+
+    Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
+    if (jit_logger != nullptr) {
+      jit_logger->WriteLog(code, jni_compiled_method.GetCode().size(), method);
+    }
+    return true;
+  }
+
+  ArenaStack arena_stack(runtime->GetJitArenaPool());
   CodeVectorAllocator code_allocator(&allocator);
   VariableSizedHandleScope handles(self);
 
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 6ff8dd6..6ec9c48 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1783,7 +1783,9 @@
     .cfi_adjust_cfa_offset FRAME_SIZE_SAVE_REFS_AND_ARGS-FRAME_SIZE_SAVE_REFS_ONLY
 
 .Lexception_in_native:
-    ldr sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET]
+    ldr ip, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET]
+    add ip, ip, #-1  // Remove the GenericJNI tag. ADD/SUB writing directly to SP is UNPREDICTABLE.
+    mov sp, ip
     .cfi_def_cfa_register sp
     # This will create a new save-all frame, required by the runtime.
     DELIVER_PENDING_EXCEPTION
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 280e593..47efeb9 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -2299,7 +2299,7 @@
 .Lexception_in_native:
     // Move to x1 then sp to please assembler.
     ldr x1, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
-    mov sp, x1
+    add sp, x1, #-1  // Remove the GenericJNI tag.
     .cfi_def_cfa_register sp
     # This will create a new save-all frame, required by the runtime.
     DELIVER_PENDING_EXCEPTION
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 489c52c..fc77a64 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -2283,7 +2283,8 @@
     nop
 
 2:
-    lw      $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
+    lw      $t0, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
+    addiu   $sp, $t0, -1  // Remove the GenericJNI tag.
     move    $gp, $s3               # restore $gp from $s3
     # This will create a new save-all frame, required by the runtime.
     DELIVER_PENDING_EXCEPTION
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 98ffe65..3fb83d9 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -2158,7 +2158,8 @@
     dmtc1   $v0, $f0               # place return value to FP return value
 
 1:
-    ld      $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
+    ld      $t0, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
+    daddiu  $sp, $t0, -1  // Remove the GenericJNI tag.
     # This will create a new save-all frame, required by the runtime.
     DELIVER_PENDING_EXCEPTION
 END art_quick_generic_jni_trampoline
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 25716dc..a46ceeb 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1969,7 +1969,9 @@
     punpckldq %xmm1, %xmm0
     ret
 .Lexception_in_native:
-    movl %fs:THREAD_TOP_QUICK_FRAME_OFFSET, %esp
+    pushl %fs:THREAD_TOP_QUICK_FRAME_OFFSET
+    addl LITERAL(-1), (%esp)  // Remove the GenericJNI tag.
+    movl (%esp), %esp
     // Do a call to push a new save-all frame required by the runtime.
     call .Lexception_call
 .Lexception_call:
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 2c3da90..463e5a2 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1958,7 +1958,9 @@
     movq %rax, %xmm0
     ret
 .Lexception_in_native:
-    movq %gs:THREAD_TOP_QUICK_FRAME_OFFSET, %rsp
+    pushq %gs:THREAD_TOP_QUICK_FRAME_OFFSET
+    addq LITERAL(-1), (%rsp)  // Remove the GenericJNI tag.
+    movq (%rsp), %rsp
     CFI_DEF_CFA_REGISTER(rsp)
     // Do a call to push a new save-all frame required by the runtime.
     call .Lexception_call
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 50913de..31abf94 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -95,10 +95,12 @@
   return method_index_;
 }
 
+template <ReadBarrierOption kReadBarrierOption>
 inline uint32_t ArtMethod::GetDexMethodIndex() {
   if (kCheckDeclaringClassState) {
-    CHECK(IsRuntimeMethod() || GetDeclaringClass()->IsIdxLoaded() ||
-          GetDeclaringClass()->IsErroneous());
+    CHECK(IsRuntimeMethod() ||
+          GetDeclaringClass<kReadBarrierOption>()->IsIdxLoaded() ||
+          GetDeclaringClass<kReadBarrierOption>()->IsErroneous());
   }
   return GetDexMethodIndexUnchecked();
 }
@@ -202,7 +204,14 @@
 inline const char* ArtMethod::GetShorty(uint32_t* out_length) {
   DCHECK(!IsProxyMethod());
   const DexFile* dex_file = GetDexFile();
-  return dex_file->GetMethodShorty(dex_file->GetMethodId(GetDexMethodIndex()), out_length);
+  // Don't do a read barrier in the DCHECK() inside GetDexMethodIndex() as GetShorty()
+  // can be called when the declaring class is about to be unloaded and cannot be added
+  // to the mark stack (subsequent GC assertion would fail).
+  // It is safe to avoid the read barrier as the ArtMethod is constructed with a declaring
+  // Class already satisfying the DCHECK() inside GetDexMethodIndex(), so even if that copy
+  // of declaring class becomes a from-space object, it shall satisfy the DCHECK().
+  return dex_file->GetMethodShorty(dex_file->GetMethodId(GetDexMethodIndex<kWithoutReadBarrier>()),
+                                   out_length);
 }
 
 inline const Signature ArtMethod::GetSignature() {
@@ -319,7 +328,7 @@
 
 template <ReadBarrierOption kReadBarrierOption>
 inline mirror::DexCache* ArtMethod::GetDexCache() {
-  if (LIKELY(!IsObsolete())) {
+  if (LIKELY(!IsObsolete<kReadBarrierOption>())) {
     mirror::Class* klass = GetDeclaringClass<kReadBarrierOption>();
     return klass->GetDexCache<kDefaultVerifyFlags, kReadBarrierOption>();
   } else {
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index fa0c501..43a5139 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -587,11 +587,6 @@
   CHECK(existing_entry_point != nullptr) << PrettyMethod() << "@" << this;
   ClassLinker* class_linker = runtime->GetClassLinker();
 
-  if (class_linker->IsQuickGenericJniStub(existing_entry_point)) {
-    // The generic JNI does not have any method header.
-    return nullptr;
-  }
-
   if (existing_entry_point == GetQuickProxyInvokeHandler()) {
     DCHECK(IsProxyMethod() && !IsConstructor());
     // The proxy entry point does not have any method header.
@@ -599,7 +594,8 @@
   }
 
   // Check whether the current entry point contains this pc.
-  if (!class_linker->IsQuickResolutionStub(existing_entry_point) &&
+  if (!class_linker->IsQuickGenericJniStub(existing_entry_point) &&
+      !class_linker->IsQuickResolutionStub(existing_entry_point) &&
       !class_linker->IsQuickToInterpreterBridge(existing_entry_point)) {
     OatQuickMethodHeader* method_header =
         OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
@@ -632,19 +628,13 @@
   OatFile::OatMethod oat_method =
       FindOatMethodFor(this, class_linker->GetImagePointerSize(), &found);
   if (!found) {
-    if (class_linker->IsQuickResolutionStub(existing_entry_point)) {
-      // We are running the generic jni stub, but the entry point of the method has not
-      // been updated yet.
-      DCHECK_EQ(pc, 0u) << "Should be a downcall";
-      DCHECK(IsNative());
-      return nullptr;
-    }
-    if (existing_entry_point == GetQuickInstrumentationEntryPoint()) {
-      // We are running the generic jni stub, but the method is being instrumented.
-      // NB We would normally expect the pc to be zero but we can have non-zero pc's if
-      // instrumentation is installed or removed during the call which is using the generic jni
-      // trampoline.
-      DCHECK(IsNative());
+    if (IsNative()) {
+      // We are running the GenericJNI stub. The entrypoint may point
+      // to different entrypoints or to a JIT-compiled JNI stub.
+      DCHECK(class_linker->IsQuickGenericJniStub(existing_entry_point) ||
+             class_linker->IsQuickResolutionStub(existing_entry_point) ||
+             existing_entry_point == GetQuickInstrumentationEntryPoint() ||
+             (jit != nullptr && jit->GetCodeCache()->ContainsPc(existing_entry_point)));
       return nullptr;
     }
     // Only for unit tests.
@@ -702,13 +692,15 @@
   declaring_class_ = GcRoot<mirror::Class>(const_cast<ArtMethod*>(src)->GetDeclaringClass());
 
   // If the entry point of the method we are copying from is from JIT code, we just
-  // put the entry point of the new method to interpreter. We could set the entry point
-  // to the JIT code, but this would require taking the JIT code cache lock to notify
-  // it, which we do not want at this level.
+  // put the entry point of the new method to interpreter or GenericJNI. We could set
+  // the entry point to the JIT code, but this would require taking the JIT code cache
+  // lock to notify it, which we do not want at this level.
   Runtime* runtime = Runtime::Current();
   if (runtime->UseJitCompilation()) {
     if (runtime->GetJit()->GetCodeCache()->ContainsPc(GetEntryPointFromQuickCompiledCode())) {
-      SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(), image_pointer_size);
+      SetEntryPointFromQuickCompiledCodePtrSize(
+          src->IsNative() ? GetQuickGenericJniStub() : GetQuickToInterpreterBridge(),
+          image_pointer_size);
     }
   }
   // Clear the profiling info for the same reasons as the JIT code.
diff --git a/runtime/art_method.h b/runtime/art_method.h
index dca6f37..0a592e0 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -242,8 +242,9 @@
     return (GetAccessFlags() & kAccDefault) != 0;
   }
 
+  template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   bool IsObsolete() {
-    return (GetAccessFlags() & kAccObsoleteMethod) != 0;
+    return (GetAccessFlags<kReadBarrierOption>() & kAccObsoleteMethod) != 0;
   }
 
   void SetIsObsolete() {
@@ -376,6 +377,7 @@
   ALWAYS_INLINE uint32_t GetDexMethodIndexUnchecked() {
     return dex_method_index_;
   }
+  template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   ALWAYS_INLINE uint32_t GetDexMethodIndex() REQUIRES_SHARED(Locks::mutator_lock_);
 
   void SetDexMethodIndex(uint32_t new_idx) {
@@ -460,12 +462,11 @@
   }
 
   ProfilingInfo* GetProfilingInfo(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_) {
-    // Don't do a read barrier in the DCHECK, as GetProfilingInfo is called in places
-    // where the declaring class is treated as a weak reference (accessing it with
-    // a read barrier would either prevent unloading the class, or crash the runtime if
-    // the GC wants to unload it).
-    DCHECK(!IsNative<kWithoutReadBarrier>());
-    if (UNLIKELY(IsProxyMethod())) {
+    // Don't do a read barrier in the DCHECK() inside GetAccessFlags() called by IsNative(),
+    // as GetProfilingInfo is called in places where the declaring class is treated as a weak
+    // reference (accessing it with a read barrier would either prevent unloading the class,
+    // or crash the runtime if the GC wants to unload it).
+    if (UNLIKELY(IsNative<kWithoutReadBarrier>()) || UNLIKELY(IsProxyMethod())) {
       return nullptr;
     }
     return reinterpret_cast<ProfilingInfo*>(GetDataPtrSize(pointer_size));
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index ccf4319..e5bb786 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -4515,7 +4515,7 @@
 
 void ClassLinker::CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out) {
   // Create constructor for Proxy that must initialize the method.
-  CHECK_EQ(GetClassRoot(kJavaLangReflectProxy)->NumDirectMethods(), 23u);
+  CHECK_EQ(GetClassRoot(kJavaLangReflectProxy)->NumDirectMethods(), 21u);
 
   // Find the <init>(InvocationHandler)V method. The exact method offset varies depending
   // on which front-end compiler was used to build the libcore DEX files.
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 1dcd935..13029fb 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -963,7 +963,11 @@
   }
   VariableSizedHandleScope hs(Thread::Current());
   std::vector<Handle<mirror::Object>> raw_instances;
-  Runtime::Current()->GetHeap()->GetInstances(hs, hs.NewHandle(c), max_count, raw_instances);
+  Runtime::Current()->GetHeap()->GetInstances(hs,
+                                              hs.NewHandle(c),
+                                              /* use_is_assignable_from */ false,
+                                              max_count,
+                                              raw_instances);
   for (size_t i = 0; i < raw_instances.size(); ++i) {
     instances->push_back(gRegistry->Add(raw_instances[i].Get()));
   }
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 2bf4372..f3450da 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -245,7 +245,7 @@
 CallerAndOuterMethod GetCalleeSaveMethodCallerAndOuterMethod(Thread* self, CalleeSaveType type) {
   CallerAndOuterMethod result;
   ScopedAssertNoThreadSuspension ants(__FUNCTION__);
-  ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
+  ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrameKnownNotTagged();
   auto outer_caller_and_pc = DoGetCalleeSaveMethodOuterCallerAndPc(sp, type);
   result.outer_method = outer_caller_and_pc.first;
   uintptr_t caller_pc = outer_caller_and_pc.second;
@@ -256,7 +256,7 @@
 
 ArtMethod* GetCalleeSaveOuterMethod(Thread* self, CalleeSaveType type) {
   ScopedAssertNoThreadSuspension ants(__FUNCTION__);
-  ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
+  ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrameKnownNotTagged();
   return DoGetCalleeSaveMethodOuterCallerAndPc(sp, type).first;
 }
 
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 2496aa0..0a76cdd 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -31,6 +31,7 @@
 #include "index_bss_mapping.h"
 #include "instrumentation.h"
 #include "interpreter/interpreter.h"
+#include "jit/jit.h"
 #include "linear_alloc.h"
 #include "method_handles.h"
 #include "method_reference.h"
@@ -2167,6 +2168,11 @@
   // Note: We cannot walk the stack properly until fixed up below.
   ArtMethod* called = *sp;
   DCHECK(called->IsNative()) << called->PrettyMethod(true);
+  Runtime* runtime = Runtime::Current();
+  jit::Jit* jit = runtime->GetJit();
+  if (jit != nullptr) {
+    jit->AddSamples(self, called, 1u, /*with_backedges*/ false);
+  }
   uint32_t shorty_len = 0;
   const char* shorty = called->GetShorty(&shorty_len);
   bool critical_native = called->IsCriticalNative();
@@ -2188,7 +2194,7 @@
   }
 
   // Fix up managed-stack things in Thread. After this we can walk the stack.
-  self->SetTopOfStack(sp);
+  self->SetTopOfStackTagged(sp);
 
   self->VerifyStack();
 
@@ -2308,6 +2314,7 @@
   // anything that requires a mutator lock before that would cause problems as GC may have the
   // exclusive mutator lock and may be moving objects, etc.
   ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
+  DCHECK(self->GetManagedStack()->GetTopQuickFrameTag());
   uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
   ArtMethod* called = *sp;
   uint32_t cookie = *(sp32 - 1);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 9f62666..f29ae92 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1796,19 +1796,25 @@
   return GetBytesFreedEver() + GetBytesAllocated();
 }
 
+// Check whether the given object is an instance of the given class.
+static bool MatchesClass(mirror::Object* obj,
+                         Handle<mirror::Class> h_class,
+                         bool use_is_assignable_from) REQUIRES_SHARED(Locks::mutator_lock_) {
+  mirror::Class* instance_class = obj->GetClass();
+  CHECK(instance_class != nullptr);
+  ObjPtr<mirror::Class> klass = h_class.Get();
+  if (use_is_assignable_from) {
+    return klass != nullptr && klass->IsAssignableFrom(instance_class);
+  }
+  return instance_class == klass;
+}
+
 void Heap::CountInstances(const std::vector<Handle<mirror::Class>>& classes,
                           bool use_is_assignable_from,
                           uint64_t* counts) {
   auto instance_counter = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
-    mirror::Class* instance_class = obj->GetClass();
-    CHECK(instance_class != nullptr);
     for (size_t i = 0; i < classes.size(); ++i) {
-      ObjPtr<mirror::Class> klass = classes[i].Get();
-      if (use_is_assignable_from) {
-        if (klass != nullptr && klass->IsAssignableFrom(instance_class)) {
-          ++counts[i];
-        }
-      } else if (instance_class == klass) {
+      if (MatchesClass(obj, classes[i], use_is_assignable_from)) {
         ++counts[i];
       }
     }
@@ -1818,11 +1824,12 @@
 
 void Heap::GetInstances(VariableSizedHandleScope& scope,
                         Handle<mirror::Class> h_class,
+                        bool use_is_assignable_from,
                         int32_t max_count,
                         std::vector<Handle<mirror::Object>>& instances) {
   DCHECK_GE(max_count, 0);
   auto instance_collector = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (obj->GetClass() == h_class.Get()) {
+    if (MatchesClass(obj, h_class, use_is_assignable_from)) {
       if (max_count == 0 || instances.size() < static_cast<size_t>(max_count)) {
         instances.push_back(scope.NewHandle(obj));
       }
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 4d7424c..ac0d82e 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -346,9 +346,10 @@
       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Implements JDWP RT_Instances.
+  // Implements VMDebug.getInstancesOfClasses and JDWP RT_Instances.
   void GetInstances(VariableSizedHandleScope& scope,
                     Handle<mirror::Class> c,
+                    bool use_is_assignable_from,
                     int32_t max_count,
                     std::vector<Handle<mirror::Object>>& instances)
       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 953e195..0d95bc6 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -643,7 +643,7 @@
     return;
   }
 
-  if (method->IsClassInitializer() || method->IsNative() || !method->IsCompilable()) {
+  if (method->IsClassInitializer() || !method->IsCompilable()) {
     // We do not want to compile such methods.
     return;
   }
@@ -659,7 +659,8 @@
     count *= priority_thread_weight_;
   }
   int32_t new_count = starting_count + count;   // int32 here to avoid wrap-around;
-  if (starting_count < warm_method_threshold_) {
+  // Note: Native method have no "warm" state or profiling info.
+  if (LIKELY(!method->IsNative()) && starting_count < warm_method_threshold_) {
     if ((new_count >= warm_method_threshold_) &&
         (method->GetProfilingInfo(kRuntimePointerSize) == nullptr)) {
       bool success = ProfilingInfo::Create(self, method, /* retry_allocation */ false);
@@ -696,6 +697,7 @@
         // If the samples don't contain any back edge, we don't increment the hotness.
         return;
       }
+      DCHECK(!method->IsNative());  // No back edges reported for native methods.
       if ((new_count >= osr_method_threshold_) &&  !code_cache_->IsOsrCompiled(method)) {
         DCHECK(thread_pool_ != nullptr);
         thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompileOsr));
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 3220513..a5c167e 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -55,6 +55,107 @@
 static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
 static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
 
+class JitCodeCache::JniStubKey {
+ public:
+  explicit JniStubKey(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
+      : shorty_(method->GetShorty()),
+        is_static_(method->IsStatic()),
+        is_fast_native_(method->IsFastNative()),
+        is_critical_native_(method->IsCriticalNative()),
+        is_synchronized_(method->IsSynchronized()) {
+    DCHECK(!(is_fast_native_ && is_critical_native_));
+  }
+
+  bool operator<(const JniStubKey& rhs) const {
+    if (is_static_ != rhs.is_static_) {
+      return rhs.is_static_;
+    }
+    if (is_synchronized_ != rhs.is_synchronized_) {
+      return rhs.is_synchronized_;
+    }
+    if (is_fast_native_ != rhs.is_fast_native_) {
+      return rhs.is_fast_native_;
+    }
+    if (is_critical_native_ != rhs.is_critical_native_) {
+      return rhs.is_critical_native_;
+    }
+    return strcmp(shorty_, rhs.shorty_) < 0;
+  }
+
+  // Update the shorty to point to another method's shorty. Call this function when removing
+  // the method that references the old shorty from JniCodeData and not removing the entire
+  // JniCodeData; the old shorty may become a dangling pointer when that method is unloaded.
+  void UpdateShorty(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_) {
+    const char* shorty = method->GetShorty();
+    DCHECK_STREQ(shorty_, shorty);
+    shorty_ = shorty;
+  }
+
+ private:
+  // The shorty points to a DexFile data and may need to change
+  // to point to the same shorty in a different DexFile.
+  mutable const char* shorty_;
+
+  const bool is_static_;
+  const bool is_fast_native_;
+  const bool is_critical_native_;
+  const bool is_synchronized_;
+};
+
+class JitCodeCache::JniStubData {
+ public:
+  JniStubData() : code_(nullptr), methods_() {}
+
+  void SetCode(const void* code) {
+    DCHECK(code != nullptr);
+    code_ = code;
+  }
+
+  const void* GetCode() const {
+    return code_;
+  }
+
+  bool IsCompiled() const {
+    return GetCode() != nullptr;
+  }
+
+  void AddMethod(ArtMethod* method) {
+    if (!ContainsElement(methods_, method)) {
+      methods_.push_back(method);
+    }
+  }
+
+  const std::vector<ArtMethod*>& GetMethods() const {
+    return methods_;
+  }
+
+  void RemoveMethodsIn(const LinearAlloc& alloc) {
+    auto kept_end = std::remove_if(
+        methods_.begin(),
+        methods_.end(),
+        [&alloc](ArtMethod* method) { return alloc.ContainsUnsafe(method); });
+    methods_.erase(kept_end, methods_.end());
+  }
+
+  bool RemoveMethod(ArtMethod* method) {
+    auto it = std::find(methods_.begin(), methods_.end(), method);
+    if (it != methods_.end()) {
+      methods_.erase(it);
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
+    std::replace(methods_.begin(), methods_.end(), old_method, new_method);
+  }
+
+ private:
+  const void* code_;
+  std::vector<ArtMethod*> methods_;
+};
+
 JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
                                    size_t max_capacity,
                                    bool generate_debug_info,
@@ -193,14 +294,36 @@
 
 bool JitCodeCache::ContainsMethod(ArtMethod* method) {
   MutexLock mu(Thread::Current(), lock_);
-  for (auto& it : method_code_map_) {
-    if (it.second == method) {
+  if (UNLIKELY(method->IsNative())) {
+    auto it = jni_stubs_map_.find(JniStubKey(method));
+    if (it != jni_stubs_map_.end() &&
+        it->second.IsCompiled() &&
+        ContainsElement(it->second.GetMethods(), method)) {
       return true;
     }
+  } else {
+    for (const auto& it : method_code_map_) {
+      if (it.second == method) {
+        return true;
+      }
+    }
   }
   return false;
 }
 
+const void* JitCodeCache::GetJniStubCode(ArtMethod* method) {
+  DCHECK(method->IsNative());
+  MutexLock mu(Thread::Current(), lock_);
+  auto it = jni_stubs_map_.find(JniStubKey(method));
+  if (it != jni_stubs_map_.end()) {
+    JniStubData& data = it->second;
+    if (data.IsCompiled() && ContainsElement(data.GetMethods(), method)) {
+      return data.GetCode();
+    }
+  }
+  return nullptr;
+}
+
 class ScopedCodeCacheWrite : ScopedTrace {
  public:
   explicit ScopedCodeCacheWrite(MemMap* code_map, bool only_for_tlb_shootdown = false)
@@ -426,7 +549,9 @@
   // Notify native debugger that we are about to remove the code.
   // It does nothing if we are not using native debugger.
   DeleteJITCodeEntryForAddress(reinterpret_cast<uintptr_t>(code_ptr));
-  FreeData(GetRootTable(code_ptr));
+  if (OatQuickMethodHeader::FromCodePointer(code_ptr)->IsOptimized()) {
+    FreeData(GetRootTable(code_ptr));
+  }  // else this is a JNI stub without any data.
   FreeCode(reinterpret_cast<uint8_t*>(allocation));
 }
 
@@ -463,6 +588,16 @@
     // lead to a deadlock.
     {
       ScopedCodeCacheWrite scc(code_map_.get());
+      for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
+        it->second.RemoveMethodsIn(alloc);
+        if (it->second.GetMethods().empty()) {
+          method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->second.GetCode()));
+          it = jni_stubs_map_.erase(it);
+        } else {
+          it->first.UpdateShorty(it->second.GetMethods().front());
+          ++it;
+        }
+      }
       for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
         if (alloc.ContainsUnsafe(it->second)) {
           method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first));
@@ -572,7 +707,8 @@
                                           bool has_should_deoptimize_flag,
                                           const ArenaSet<ArtMethod*>&
                                               cha_single_implementation_list) {
-  DCHECK(stack_map != nullptr);
+  DCHECK_NE(stack_map != nullptr, method->IsNative());
+  DCHECK(!method->IsNative() || !osr);
   size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
   // Ensure the header ends up at expected instruction alignment.
   size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
@@ -596,8 +732,8 @@
       std::copy(code, code + code_size, code_ptr);
       method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
       new (method_header) OatQuickMethodHeader(
-          code_ptr - stack_map,
-          code_ptr - method_info,
+          (stack_map != nullptr) ? code_ptr - stack_map : 0u,
+          (method_info != nullptr) ? code_ptr - method_info : 0u,
           frame_size_in_bytes,
           core_spill_mask,
           fp_spill_mask,
@@ -652,24 +788,40 @@
     // possible that the compiled code is considered invalidated by some class linking,
     // but below we still make the compiled code valid for the method.
     MutexLock mu(self, lock_);
-    // Fill the root table before updating the entry point.
-    DCHECK_EQ(FromStackMapToRoots(stack_map), roots_data);
-    DCHECK_LE(roots_data, stack_map);
-    FillRootTable(roots_data, roots);
-    {
-      // Flush data cache, as compiled code references literals in it.
-      // We also need a TLB shootdown to act as memory barrier across cores.
-      ScopedCodeCacheWrite ccw(code_map_.get(), /* only_for_tlb_shootdown */ true);
-      FlushDataCache(reinterpret_cast<char*>(roots_data),
-                     reinterpret_cast<char*>(roots_data + data_size));
-    }
-    method_code_map_.Put(code_ptr, method);
-    if (osr) {
-      number_of_osr_compilations_++;
-      osr_code_map_.Put(method, code_ptr);
+    if (UNLIKELY(method->IsNative())) {
+      DCHECK(stack_map == nullptr);
+      DCHECK(roots_data == nullptr);
+      auto it = jni_stubs_map_.find(JniStubKey(method));
+      DCHECK(it != jni_stubs_map_.end())
+          << "Entry inserted in NotifyCompilationOf() should be alive.";
+      JniStubData* data = &it->second;
+      DCHECK(ContainsElement(data->GetMethods(), method))
+          << "Entry inserted in NotifyCompilationOf() should contain this method.";
+      data->SetCode(code_ptr);
+      instrumentation::Instrumentation* instrum = Runtime::Current()->GetInstrumentation();
+      for (ArtMethod* m : data->GetMethods()) {
+        instrum->UpdateMethodsCode(m, method_header->GetEntryPoint());
+      }
     } else {
-      Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
-          method, method_header->GetEntryPoint());
+      // Fill the root table before updating the entry point.
+      DCHECK_EQ(FromStackMapToRoots(stack_map), roots_data);
+      DCHECK_LE(roots_data, stack_map);
+      FillRootTable(roots_data, roots);
+      {
+        // Flush data cache, as compiled code references literals in it.
+        // We also need a TLB shootdown to act as memory barrier across cores.
+        ScopedCodeCacheWrite ccw(code_map_.get(), /* only_for_tlb_shootdown */ true);
+        FlushDataCache(reinterpret_cast<char*>(roots_data),
+                       reinterpret_cast<char*>(roots_data + data_size));
+      }
+      method_code_map_.Put(code_ptr, method);
+      if (osr) {
+        number_of_osr_compilations_++;
+        osr_code_map_.Put(method, code_ptr);
+      } else {
+        Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
+            method, method_header->GetEntryPoint());
+      }
     }
     if (collection_in_progress_) {
       // We need to update the live bitmap if there is a GC to ensure it sees this new
@@ -703,45 +855,18 @@
 }
 
 bool JitCodeCache::RemoveMethod(ArtMethod* method, bool release_memory) {
+  // This function is used only for testing and only with non-native methods.
+  CHECK(!method->IsNative());
+
   MutexLock mu(Thread::Current(), lock_);
-  if (method->IsNative()) {
-    return false;
-  }
 
-  bool in_cache = false;
-  {
-    ScopedCodeCacheWrite ccw(code_map_.get());
-    for (auto code_iter = method_code_map_.begin(); code_iter != method_code_map_.end();) {
-      if (code_iter->second == method) {
-        if (release_memory) {
-          FreeCode(code_iter->first);
-        }
-        code_iter = method_code_map_.erase(code_iter);
-        in_cache = true;
-        continue;
-      }
-      ++code_iter;
-    }
-  }
-
-  bool osr = false;
-  auto code_map = osr_code_map_.find(method);
-  if (code_map != osr_code_map_.end()) {
-    osr_code_map_.erase(code_map);
-    osr = true;
-  }
+  bool osr = osr_code_map_.find(method) != osr_code_map_.end();
+  bool in_cache = RemoveMethodLocked(method, release_memory);
 
   if (!in_cache) {
     return false;
   }
 
-  ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
-  if (info != nullptr) {
-    auto profile = std::find(profiling_infos_.begin(), profiling_infos_.end(), info);
-    DCHECK(profile != profiling_infos_.end());
-    profiling_infos_.erase(profile);
-  }
-  method->SetProfilingInfo(nullptr);
   method->ClearCounter();
   Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
       method, GetQuickToInterpreterBridge());
@@ -753,34 +878,58 @@
   return true;
 }
 
+bool JitCodeCache::RemoveMethodLocked(ArtMethod* method, bool release_memory) {
+  if (LIKELY(!method->IsNative())) {
+    ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
+    if (info != nullptr) {
+      RemoveElement(profiling_infos_, info);
+    }
+    method->SetProfilingInfo(nullptr);
+  }
+
+  bool in_cache = false;
+  ScopedCodeCacheWrite ccw(code_map_.get());
+  if (UNLIKELY(method->IsNative())) {
+    auto it = jni_stubs_map_.find(JniStubKey(method));
+    if (it != jni_stubs_map_.end() && it->second.RemoveMethod(method)) {
+      in_cache = true;
+      if (it->second.GetMethods().empty()) {
+        if (release_memory) {
+          FreeCode(it->second.GetCode());
+        }
+        jni_stubs_map_.erase(it);
+      } else {
+        it->first.UpdateShorty(it->second.GetMethods().front());
+      }
+    }
+  } else {
+    for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
+      if (it->second == method) {
+        in_cache = true;
+        if (release_memory) {
+          FreeCode(it->first);
+        }
+        it = method_code_map_.erase(it);
+      } else {
+        ++it;
+      }
+    }
+
+    auto osr_it = osr_code_map_.find(method);
+    if (osr_it != osr_code_map_.end()) {
+      osr_code_map_.erase(osr_it);
+    }
+  }
+
+  return in_cache;
+}
+
 // This notifies the code cache that the given method has been redefined and that it should remove
 // any cached information it has on the method. All threads must be suspended before calling this
 // method. The compiled code for the method (if there is any) must not be in any threads call stack.
 void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) {
   MutexLock mu(Thread::Current(), lock_);
-  if (method->IsNative()) {
-    return;
-  }
-  ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
-  if (info != nullptr) {
-    auto profile = std::find(profiling_infos_.begin(), profiling_infos_.end(), info);
-    DCHECK(profile != profiling_infos_.end());
-    profiling_infos_.erase(profile);
-  }
-  method->SetProfilingInfo(nullptr);
-  ScopedCodeCacheWrite ccw(code_map_.get());
-  for (auto code_iter = method_code_map_.begin(); code_iter != method_code_map_.end();) {
-    if (code_iter->second == method) {
-      FreeCode(code_iter->first);
-      code_iter = method_code_map_.erase(code_iter);
-      continue;
-    }
-    ++code_iter;
-  }
-  auto code_map = osr_code_map_.find(method);
-  if (code_map != osr_code_map_.end()) {
-    osr_code_map_.erase(code_map);
-  }
+  RemoveMethodLocked(method, /* release_memory */ true);
 }
 
 // This invalidates old_method. Once this function returns one can no longer use old_method to
@@ -790,11 +939,15 @@
 // shouldn't be used since it is no longer logically in the jit code cache.
 // TODO We should add DCHECKS that validate that the JIT is paused when this method is entered.
 void JitCodeCache::MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
-  // Native methods have no profiling info and need no special handling from the JIT code cache.
+  MutexLock mu(Thread::Current(), lock_);
   if (old_method->IsNative()) {
+    // Update methods in jni_stubs_map_.
+    for (auto& entry : jni_stubs_map_) {
+      JniStubData& data = entry.second;
+      data.MoveObsoleteMethod(old_method, new_method);
+    }
     return;
   }
-  MutexLock mu(Thread::Current(), lock_);
   // Update ProfilingInfo to the new one and remove it from the old_method.
   if (old_method->GetProfilingInfo(kRuntimePointerSize) != nullptr) {
     DCHECK_EQ(old_method->GetProfilingInfo(kRuntimePointerSize)->GetMethod(), old_method);
@@ -936,7 +1089,7 @@
         // its stack frame, it is not the method owning return_pc_. We just pass null to
         // LookupMethodHeader: the method is only checked against in debug builds.
         OatQuickMethodHeader* method_header =
-            code_cache_->LookupMethodHeader(frame.return_pc_, nullptr);
+            code_cache_->LookupMethodHeader(frame.return_pc_, /* method */ nullptr);
         if (method_header != nullptr) {
           const void* code = method_header->GetCode();
           CHECK(code_cache_->GetLiveBitmap()->Test(FromCodeToAllocation(code)));
@@ -1089,7 +1242,7 @@
           const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
           if (ContainsPc(entry_point)) {
             info->SetSavedEntryPoint(entry_point);
-            // Don't call Instrumentation::UpdateMethods, as it can check the declaring
+            // Don't call Instrumentation::UpdateMethodsCode(), as it can check the declaring
             // class of the method. We may be concurrently running a GC which makes accessing
             // the class unsafe. We know it is OK to bypass the instrumentation as we've just
             // checked that the current entry point is JIT compiled code.
@@ -1098,6 +1251,25 @@
         }
 
         DCHECK(CheckLiveCompiledCodeHasProfilingInfo());
+
+        // Change entry points of native methods back to the GenericJNI entrypoint.
+        for (const auto& entry : jni_stubs_map_) {
+          const JniStubData& data = entry.second;
+          if (!data.IsCompiled()) {
+            continue;
+          }
+          // Make sure a single invocation of the GenericJNI trampoline tries to recompile.
+          uint16_t new_counter = Runtime::Current()->GetJit()->HotMethodThreshold() - 1u;
+          const OatQuickMethodHeader* method_header =
+              OatQuickMethodHeader::FromCodePointer(data.GetCode());
+          for (ArtMethod* method : data.GetMethods()) {
+            if (method->GetEntryPointFromQuickCompiledCode() == method_header->GetEntryPoint()) {
+              // Don't call Instrumentation::UpdateMethodsCode(), same as for normal methods above.
+              method->SetCounter(new_counter);
+              method->SetEntryPointFromQuickCompiledCode(GetQuickGenericJniStub());
+            }
+          }
+        }
       }
       live_bitmap_.reset(nullptr);
       NotifyCollectionDone(self);
@@ -1113,13 +1285,22 @@
     MutexLock mu(self, lock_);
     ScopedCodeCacheWrite scc(code_map_.get());
     // Iterate over all compiled code and remove entries that are not marked.
+    for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
+      JniStubData* data = &it->second;
+      if (!data->IsCompiled() || GetLiveBitmap()->Test(FromCodeToAllocation(data->GetCode()))) {
+        ++it;
+      } else {
+        method_headers.insert(OatQuickMethodHeader::FromCodePointer(data->GetCode()));
+        it = jni_stubs_map_.erase(it);
+      }
+    }
     for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
       const void* code_ptr = it->first;
       uintptr_t allocation = FromCodeToAllocation(code_ptr);
       if (GetLiveBitmap()->Test(allocation)) {
         ++it;
       } else {
-        method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first));
+        method_headers.insert(OatQuickMethodHeader::FromCodePointer(code_ptr));
         it = method_code_map_.erase(it);
       }
     }
@@ -1158,6 +1339,17 @@
     // an entry point is either:
     // - an osr compiled code, that will be removed if not in a thread call stack.
     // - discarded compiled code, that will be removed if not in a thread call stack.
+    for (const auto& entry : jni_stubs_map_) {
+      const JniStubData& data = entry.second;
+      const void* code_ptr = data.GetCode();
+      const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+      for (ArtMethod* method : data.GetMethods()) {
+        if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
+          GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
+          break;
+        }
+      }
+    }
     for (const auto& it : method_code_map_) {
       ArtMethod* method = it.second;
       const void* code_ptr = it.first;
@@ -1237,19 +1429,51 @@
     return nullptr;
   }
 
-  MutexLock mu(Thread::Current(), lock_);
-  if (method_code_map_.empty()) {
-    return nullptr;
+  if (!kIsDebugBuild) {
+    // Called with null `method` only from MarkCodeClosure::Run() in debug build.
+    CHECK(method != nullptr);
   }
-  auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc));
-  --it;
 
-  const void* code_ptr = it->first;
-  OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
-  if (!method_header->Contains(pc)) {
-    return nullptr;
+  MutexLock mu(Thread::Current(), lock_);
+  OatQuickMethodHeader* method_header = nullptr;
+  ArtMethod* found_method = nullptr;  // Only for DCHECK(), not for JNI stubs.
+  if (method != nullptr && UNLIKELY(method->IsNative())) {
+    auto it = jni_stubs_map_.find(JniStubKey(method));
+    if (it == jni_stubs_map_.end() || !ContainsElement(it->second.GetMethods(), method)) {
+      return nullptr;
+    }
+    const void* code_ptr = it->second.GetCode();
+    method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+    if (!method_header->Contains(pc)) {
+      return nullptr;
+    }
+  } else {
+    auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc));
+    if (it != method_code_map_.begin()) {
+      --it;
+      const void* code_ptr = it->first;
+      if (OatQuickMethodHeader::FromCodePointer(code_ptr)->Contains(pc)) {
+        method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+        found_method = it->second;
+      }
+    }
+    if (method_header == nullptr && method == nullptr) {
+      // Scan all compiled JNI stubs as well. This slow search is used only
+      // for checks in debug build, for release builds the `method` is not null.
+      for (auto&& entry : jni_stubs_map_) {
+        const JniStubData& data = entry.second;
+        if (data.IsCompiled() &&
+            OatQuickMethodHeader::FromCodePointer(data.GetCode())->Contains(pc)) {
+          method_header = OatQuickMethodHeader::FromCodePointer(data.GetCode());
+        }
+      }
+    }
+    if (method_header == nullptr) {
+      return nullptr;
+    }
   }
-  if (kIsDebugBuild && method != nullptr) {
+
+  if (kIsDebugBuild && method != nullptr && !method->IsNative()) {
     // When we are walking the stack to redefine classes and creating obsolete methods it is
     // possible that we might have updated the method_code_map by making this method obsolete in a
     // previous frame. Therefore we should just check that the non-obsolete version of this method
@@ -1258,9 +1482,9 @@
     // occur when we are in the process of allocating and setting up obsolete methods. Otherwise
     // method and it->second should be identical. (See openjdkjvmti/ti_redefine.cc for more
     // information.)
-    DCHECK_EQ(it->second->GetNonObsoleteMethod(), method->GetNonObsoleteMethod())
+    DCHECK_EQ(found_method->GetNonObsoleteMethod(), method->GetNonObsoleteMethod())
         << ArtMethod::PrettyMethod(method->GetNonObsoleteMethod()) << " "
-        << ArtMethod::PrettyMethod(it->second->GetNonObsoleteMethod()) << " "
+        << ArtMethod::PrettyMethod(found_method->GetNonObsoleteMethod()) << " "
         << std::hex << pc;
   }
   return method_header;
@@ -1449,21 +1673,51 @@
     return false;
   }
 
-  ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
-  if (info == nullptr) {
-    VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled";
-    // Because the counter is not atomic, there are some rare cases where we may not hit the
-    // threshold for creating the ProfilingInfo. Reset the counter now to "correct" this.
-    ClearMethodCounter(method, /*was_warm*/ false);
-    return false;
-  }
+  if (UNLIKELY(method->IsNative())) {
+    JniStubKey key(method);
+    auto it = jni_stubs_map_.find(key);
+    bool new_compilation = false;
+    if (it == jni_stubs_map_.end()) {
+      // Create a new entry to mark the stub as being compiled.
+      it = jni_stubs_map_.Put(key, JniStubData{});
+      new_compilation = true;
+    }
+    JniStubData* data = &it->second;
+    data->AddMethod(method);
+    if (data->IsCompiled()) {
+      OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(data->GetCode());
+      const void* entrypoint = method_header->GetEntryPoint();
+      // Update also entrypoints of other methods held by the JniStubData.
+      // We could simply update the entrypoint of `method` but if the last JIT GC has
+      // changed these entrypoints to GenericJNI in preparation for a full GC, we may
+      // as well change them back as this stub shall not be collected anyway and this
+      // can avoid a few expensive GenericJNI calls.
+      instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+      for (ArtMethod* m : data->GetMethods()) {
+        instrumentation->UpdateMethodsCode(m, entrypoint);
+      }
+      if (collection_in_progress_) {
+        GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(data->GetCode()));
+      }
+    }
+    return new_compilation;
+  } else {
+    ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
+    if (info == nullptr) {
+      VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled";
+      // Because the counter is not atomic, there are some rare cases where we may not hit the
+      // threshold for creating the ProfilingInfo. Reset the counter now to "correct" this.
+      ClearMethodCounter(method, /*was_warm*/ false);
+      return false;
+    }
 
-  if (info->IsMethodBeingCompiled(osr)) {
-    return false;
-  }
+    if (info->IsMethodBeingCompiled(osr)) {
+      return false;
+    }
 
-  info->SetIsMethodBeingCompiled(true, osr);
-  return true;
+    info->SetIsMethodBeingCompiled(true, osr);
+    return true;
+  }
 }
 
 ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) {
@@ -1485,10 +1739,23 @@
   info->DecrementInlineUse();
 }
 
-void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self ATTRIBUTE_UNUSED, bool osr) {
-  ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
-  DCHECK(info->IsMethodBeingCompiled(osr));
-  info->SetIsMethodBeingCompiled(false, osr);
+void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self, bool osr) {
+  DCHECK_EQ(Thread::Current(), self);
+  MutexLock mu(self, lock_);
+  if (UNLIKELY(method->IsNative())) {
+    auto it = jni_stubs_map_.find(JniStubKey(method));
+    DCHECK(it != jni_stubs_map_.end());
+    JniStubData* data = &it->second;
+    DCHECK(ContainsElement(data->GetMethods(), method));
+    if (UNLIKELY(!data->IsCompiled())) {
+      // Failed to compile; the JNI compiler never fails, but the cache may be full.
+      jni_stubs_map_.erase(it);  // Remove the entry added in NotifyCompilationOf().
+    }  // else CommitCodeInternal() updated entrypoints of all methods in the JniStubData.
+  } else {
+    ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
+    DCHECK(info->IsMethodBeingCompiled(osr));
+    info->SetIsMethodBeingCompiled(false, osr);
+  }
 }
 
 size_t JitCodeCache::GetMemorySizeOfCodePointer(const void* ptr) {
@@ -1498,6 +1765,7 @@
 
 void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
                                              const OatQuickMethodHeader* header) {
+  DCHECK(!method->IsNative());
   ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize);
   if ((profiling_info != nullptr) &&
       (profiling_info->GetSavedEntryPoint() == header->GetEntryPoint())) {
@@ -1553,6 +1821,7 @@
   os << "Current JIT code cache size: " << PrettySize(used_memory_for_code_) << "\n"
      << "Current JIT data cache size: " << PrettySize(used_memory_for_data_) << "\n"
      << "Current JIT capacity: " << PrettySize(current_capacity_) << "\n"
+     << "Current number of JIT JNI stub entries: " << jni_stubs_map_.size() << "\n"
      << "Current number of JIT code cache entries: " << method_code_map_.size() << "\n"
      << "Total number of JIT compilations: " << number_of_compilations_ << "\n"
      << "Total number of JIT compilations for on stack replacement: "
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 46a4085..fc011dd 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -35,9 +35,23 @@
 class LinearAlloc;
 class InlineCache;
 class IsMarkedVisitor;
+class JitJniStubTestHelper;
 class OatQuickMethodHeader;
 struct ProfileMethodInfo;
 class ProfilingInfo;
+class Thread;
+
+namespace gc {
+namespace accounting {
+template<size_t kAlignment> class MemoryRangeBitmap;
+}  // namespace accounting
+}  // namespace gc
+
+namespace mirror {
+class Class;
+class Object;
+template<class T> class ObjectArray;
+}  // namespace mirror
 
 namespace gc {
 namespace accounting {
@@ -137,6 +151,9 @@
   // Return true if the code cache contains this method.
   bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_);
 
+  // Return the code pointer for a JNI-compiled stub if the method is in the cache, null otherwise.
+  const void* GetJniStubCode(ArtMethod* method) REQUIRES(!lock_);
+
   // Allocate a region of data that contain `size` bytes, and potentially space
   // for storing `number_of_roots` roots. Returns null if there is no more room.
   // Return the number of bytes allocated.
@@ -160,11 +177,6 @@
     return live_bitmap_.get();
   }
 
-  // Return whether we should do a full collection given the current state of the cache.
-  bool ShouldDoFullCollection()
-      REQUIRES(lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   // Perform a collection on the code cache.
   void GarbageCollectCache(Thread* self)
       REQUIRES(!lock_)
@@ -296,6 +308,12 @@
       REQUIRES(!lock_)
       REQUIRES(!Locks::cha_lock_);
 
+  // Removes method from the cache. The caller must ensure that all threads
+  // are suspended and the method should not be in any thread's stack.
+  bool RemoveMethodLocked(ArtMethod* method, bool release_memory)
+      REQUIRES(lock_)
+      REQUIRES(Locks::mutator_lock_);
+
   // Free in the mspace allocations for `code_ptr`.
   void FreeCode(const void* code_ptr) REQUIRES(lock_);
 
@@ -315,6 +333,11 @@
   // Set the footprint limit of the code cache.
   void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);
 
+  // Return whether we should do a full collection given the current state of the cache.
+  bool ShouldDoFullCollection()
+      REQUIRES(lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   void DoCollection(Thread* self, bool collect_profiling_info)
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -341,6 +364,9 @@
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  class JniStubKey;
+  class JniStubData;
+
   // Lock for guarding allocations, collections, and the method_code_map_.
   Mutex lock_;
   // Condition to wait on during collection.
@@ -357,6 +383,8 @@
   void* data_mspace_ GUARDED_BY(lock_);
   // Bitmap for collecting code and data.
   std::unique_ptr<CodeCacheBitmap> live_bitmap_;
+  // Holds compiled code associated with the shorty for a JNI stub.
+  SafeMap<JniStubKey, JniStubData> jni_stubs_map_ GUARDED_BY(lock_);
   // Holds compiled code associated to the ArtMethod.
   SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
   // Holds osr compiled code associated to the ArtMethod.
@@ -418,6 +446,7 @@
   // Condition to wait on for accessing inline caches.
   ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
 
+  friend class art::JitJniStubTestHelper;
   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
 };
 
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 01853de..acbc6e6 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -357,8 +357,8 @@
             sampled_methods->AddReference(method.GetDexFile(), method.GetDexMethodIndex());
           }
         } else {
-          CHECK_EQ(method.GetCounter(), 0u) << method.PrettyMethod()
-              << " access_flags=" << method.GetAccessFlags();
+          // We do not record native methods. Once we AOT-compile the app, all native
+          // methods shall have their thunks compiled.
         }
       }
     }
diff --git a/runtime/managed_stack-inl.h b/runtime/managed_stack-inl.h
index 689dd80..678be8e 100644
--- a/runtime/managed_stack-inl.h
+++ b/runtime/managed_stack-inl.h
@@ -24,7 +24,7 @@
 namespace art {
 
 inline ShadowFrame* ManagedStack::PushShadowFrame(ShadowFrame* new_top_frame) {
-  DCHECK(top_quick_frame_ == nullptr);
+  DCHECK(!HasTopQuickFrame());
   ShadowFrame* old_frame = top_shadow_frame_;
   top_shadow_frame_ = new_top_frame;
   new_top_frame->SetLink(old_frame);
@@ -32,7 +32,7 @@
 }
 
 inline ShadowFrame* ManagedStack::PopShadowFrame() {
-  DCHECK(top_quick_frame_ == nullptr);
+  DCHECK(!HasTopQuickFrame());
   CHECK(top_shadow_frame_ != nullptr);
   ShadowFrame* frame = top_shadow_frame_;
   top_shadow_frame_ = frame->GetLink();
diff --git a/runtime/managed_stack.h b/runtime/managed_stack.h
index 4f1984d..07078ec 100644
--- a/runtime/managed_stack.h
+++ b/runtime/managed_stack.h
@@ -24,6 +24,7 @@
 #include "base/logging.h"
 #include "base/macros.h"
 #include "base/mutex.h"
+#include "base/bit_utils.h"
 
 namespace art {
 
@@ -42,7 +43,9 @@
 class PACKED(4) ManagedStack {
  public:
   ManagedStack()
-      : top_quick_frame_(nullptr), link_(nullptr), top_shadow_frame_(nullptr) {}
+      : tagged_top_quick_frame_(TaggedTopQuickFrame::CreateNotTagged(nullptr)),
+        link_(nullptr),
+        top_shadow_frame_(nullptr) {}
 
   void PushManagedStackFragment(ManagedStack* fragment) {
     // Copy this top fragment into given fragment.
@@ -63,17 +66,36 @@
     return link_;
   }
 
+  ArtMethod** GetTopQuickFrameKnownNotTagged() const {
+    return tagged_top_quick_frame_.GetSpKnownNotTagged();
+  }
+
   ArtMethod** GetTopQuickFrame() const {
-    return top_quick_frame_;
+    return tagged_top_quick_frame_.GetSp();
+  }
+
+  bool GetTopQuickFrameTag() const {
+    return tagged_top_quick_frame_.GetTag();
+  }
+
+  bool HasTopQuickFrame() const {
+    return tagged_top_quick_frame_.GetTaggedSp() != 0u;
   }
 
   void SetTopQuickFrame(ArtMethod** top) {
     DCHECK(top_shadow_frame_ == nullptr);
-    top_quick_frame_ = top;
+    DCHECK_ALIGNED(top, 4u);
+    tagged_top_quick_frame_ = TaggedTopQuickFrame::CreateNotTagged(top);
   }
 
-  static size_t TopQuickFrameOffset() {
-    return OFFSETOF_MEMBER(ManagedStack, top_quick_frame_);
+  void SetTopQuickFrameTagged(ArtMethod** top) {
+    DCHECK(top_shadow_frame_ == nullptr);
+    DCHECK_ALIGNED(top, 4u);
+    tagged_top_quick_frame_ = TaggedTopQuickFrame::CreateTagged(top);
+  }
+
+  static size_t TaggedTopQuickFrameOffset() {
+    return OFFSETOF_MEMBER(ManagedStack, tagged_top_quick_frame_);
   }
 
   ALWAYS_INLINE ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame);
@@ -83,8 +105,12 @@
     return top_shadow_frame_;
   }
 
+  bool HasTopShadowFrame() const {
+    return GetTopShadowFrame() != nullptr;
+  }
+
   void SetTopShadowFrame(ShadowFrame* top) {
-    DCHECK(top_quick_frame_ == nullptr);
+    DCHECK_EQ(tagged_top_quick_frame_.GetTaggedSp(), 0u);
     top_shadow_frame_ = top;
   }
 
@@ -97,7 +123,47 @@
   bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const;
 
  private:
-  ArtMethod** top_quick_frame_;
+  // Encodes the top quick frame (which must be at least 4-byte aligned)
+  // and a flag that marks the GenericJNI trampoline.
+  class TaggedTopQuickFrame {
+   public:
+    static TaggedTopQuickFrame CreateNotTagged(ArtMethod** sp) {
+      DCHECK_ALIGNED(sp, 4u);
+      return TaggedTopQuickFrame(reinterpret_cast<uintptr_t>(sp));
+    }
+
+    static TaggedTopQuickFrame CreateTagged(ArtMethod** sp) {
+      DCHECK_ALIGNED(sp, 4u);
+      return TaggedTopQuickFrame(reinterpret_cast<uintptr_t>(sp) | 1u);
+    }
+
+    // Get SP known to be not tagged and non-null.
+    ArtMethod** GetSpKnownNotTagged() const {
+      DCHECK(!GetTag());
+      DCHECK_NE(tagged_sp_, 0u);
+      return reinterpret_cast<ArtMethod**>(tagged_sp_);
+    }
+
+    ArtMethod** GetSp() const {
+      return reinterpret_cast<ArtMethod**>(tagged_sp_ & ~static_cast<uintptr_t>(1u));
+    }
+
+    bool GetTag() const {
+      return (tagged_sp_ & 1u) != 0u;
+    }
+
+    uintptr_t GetTaggedSp() const {
+      return tagged_sp_;
+    }
+
+   private:
+    explicit TaggedTopQuickFrame(uintptr_t tagged_sp) : tagged_sp_(tagged_sp) { }
+
+    uintptr_t tagged_sp_;
+  };
+  static_assert(sizeof(TaggedTopQuickFrame) == sizeof(uintptr_t), "TaggedTopQuickFrame size check");
+
+  TaggedTopQuickFrame tagged_top_quick_frame_;
   ManagedStack* link_;
   ShadowFrame* top_shadow_frame_;
 };
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 2663bea..88a78ab 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -319,6 +319,53 @@
   return soa.AddLocalReference<jlongArray>(long_counts);
 }
 
+static jobjectArray VMDebug_getInstancesOfClasses(JNIEnv* env,
+                                                  jclass,
+                                                  jobjectArray javaClasses,
+                                                  jboolean includeAssignable) {
+  ScopedObjectAccess soa(env);
+  StackHandleScope<2> hs(soa.Self());
+  Handle<mirror::ObjectArray<mirror::Class>> classes = hs.NewHandle(
+      soa.Decode<mirror::ObjectArray<mirror::Class>>(javaClasses));
+  if (classes == nullptr) {
+    return nullptr;
+  }
+
+  jclass object_array_class = env->FindClass("[Ljava/lang/Object;");
+  if (env->ExceptionCheck() == JNI_TRUE) {
+    return nullptr;
+  }
+  CHECK(object_array_class != nullptr);
+
+  size_t num_classes = classes->GetLength();
+  jobjectArray result = env->NewObjectArray(num_classes, object_array_class, nullptr);
+  if (env->ExceptionCheck() == JNI_TRUE) {
+    return nullptr;
+  }
+
+  gc::Heap* const heap = Runtime::Current()->GetHeap();
+  MutableHandle<mirror::Class> h_class(hs.NewHandle<mirror::Class>(nullptr));
+  for (size_t i = 0; i < num_classes; ++i) {
+    h_class.Assign(classes->Get(i));
+
+    VariableSizedHandleScope hs2(soa.Self());
+    std::vector<Handle<mirror::Object>> raw_instances;
+    heap->GetInstances(hs2, h_class, includeAssignable, /* max_count */ 0, raw_instances);
+    jobjectArray array = env->NewObjectArray(raw_instances.size(),
+                                             WellKnownClasses::java_lang_Object,
+                                             nullptr);
+    if (env->ExceptionCheck() == JNI_TRUE) {
+      return nullptr;
+    }
+
+    for (size_t j = 0; j < raw_instances.size(); ++j) {
+      env->SetObjectArrayElement(array, j, raw_instances[j].ToJObject());
+    }
+    env->SetObjectArrayElement(result, i, array);
+  }
+  return result;
+}
+
 // We export the VM internal per-heap-space size/alloc/free metrics
 // for the zygote space, alloc space (application heap), and the large
 // object space for dumpsys meminfo. The other memory region data such
@@ -534,6 +581,7 @@
   NATIVE_METHOD(VMDebug, dumpReferenceTables, "()V"),
   NATIVE_METHOD(VMDebug, getAllocCount, "(I)I"),
   NATIVE_METHOD(VMDebug, getHeapSpaceStats, "([J)V"),
+  NATIVE_METHOD(VMDebug, getInstancesOfClasses, "([Ljava/lang/Class;Z)[[Ljava/lang/Object;"),
   NATIVE_METHOD(VMDebug, getInstructionCount, "([I)V"),
   FAST_NATIVE_METHOD(VMDebug, getLoadedClassCount, "()I"),
   NATIVE_METHOD(VMDebug, getVmFeatureList, "()[Ljava/lang/String;"),
diff --git a/runtime/stack.cc b/runtime/stack.cc
index ab9fb0d..5ad1f7c 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -735,12 +735,19 @@
     return runtime->GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
   }
 
-  // The only remaining case is if the method is native and uses the generic JNI stub.
+  // The only remaining case is if the method is native and uses the generic JNI stub,
+  // called either directly or through some (resolution, instrumentation) trampoline.
   DCHECK(method->IsNative());
-  ClassLinker* class_linker = runtime->GetClassLinker();
-  const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method,
-                                                                           kRuntimePointerSize);
-  DCHECK(class_linker->IsQuickGenericJniStub(entry_point)) << method->PrettyMethod();
+  if (kIsDebugBuild) {
+    ClassLinker* class_linker = runtime->GetClassLinker();
+    const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method,
+                                                                             kRuntimePointerSize);
+    CHECK(class_linker->IsQuickGenericJniStub(entry_point) ||
+          // The current entrypoint (after filtering out trampolines) may have changed
+          // from GenericJNI to JIT-compiled stub since we have entered this frame.
+          (runtime->GetJit() != nullptr &&
+           runtime->GetJit()->GetCodeCache()->ContainsPc(entry_point))) << method->PrettyMethod();
+  }
   // Generic JNI frame.
   uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(method) + 1;
   size_t scope_size = HandleScope::SizeOf(handle_refs);
@@ -776,8 +783,48 @@
       // Can't be both a shadow and a quick fragment.
       DCHECK(current_fragment->GetTopShadowFrame() == nullptr);
       ArtMethod* method = *cur_quick_frame_;
+      DCHECK(method != nullptr);
+      bool header_retrieved = false;
+      if (method->IsNative()) {
+        // We do not have a PC for the first frame, so we cannot simply use
+        // ArtMethod::GetOatQuickMethodHeader() as we're unable to distinguish there
+        // between GenericJNI frame and JIT-compiled JNI stub; the entrypoint may have
+        // changed since the frame was entered. The top quick frame tag indicates
+        // GenericJNI here, otherwise it's either AOT-compiled or JNI-compiled JNI stub.
+        if (UNLIKELY(current_fragment->GetTopQuickFrameTag())) {
+          // The generic JNI does not have any method header.
+          cur_oat_quick_method_header_ = nullptr;
+        } else {
+          const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
+          CHECK(existing_entry_point != nullptr);
+          Runtime* runtime = Runtime::Current();
+          ClassLinker* class_linker = runtime->GetClassLinker();
+          // Check whether we can quickly get the header from the current entrypoint.
+          if (!class_linker->IsQuickGenericJniStub(existing_entry_point) &&
+              !class_linker->IsQuickResolutionStub(existing_entry_point) &&
+              existing_entry_point != GetQuickInstrumentationEntryPoint()) {
+            cur_oat_quick_method_header_ =
+                OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
+          } else {
+            const void* code = method->GetOatMethodQuickCode(class_linker->GetImagePointerSize());
+            if (code != nullptr) {
+              cur_oat_quick_method_header_ = OatQuickMethodHeader::FromEntryPoint(code);
+            } else {
+              // This must be a JITted JNI stub frame.
+              CHECK(runtime->GetJit() != nullptr);
+              code = runtime->GetJit()->GetCodeCache()->GetJniStubCode(method);
+              CHECK(code != nullptr) << method->PrettyMethod();
+              cur_oat_quick_method_header_ = OatQuickMethodHeader::FromCodePointer(code);
+            }
+          }
+        }
+        header_retrieved = true;
+      }
       while (method != nullptr) {
-        cur_oat_quick_method_header_ = method->GetOatQuickMethodHeader(cur_quick_frame_pc_);
+        if (!header_retrieved) {
+          cur_oat_quick_method_header_ = method->GetOatQuickMethodHeader(cur_quick_frame_pc_);
+        }
+        header_retrieved = false;  // Force header retrieval in next iteration.
         SanityCheckFrame();
 
         if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
diff --git a/runtime/stack.h b/runtime/stack.h
index bd6204f..a16930b 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -140,8 +140,7 @@
   };
 
   template <CountTransitions kCount = CountTransitions::kYes>
-  void WalkStack(bool include_transitions = false)
-      REQUIRES_SHARED(Locks::mutator_lock_);
+  void WalkStack(bool include_transitions = false) REQUIRES_SHARED(Locks::mutator_lock_);
 
   Thread* GetThread() const {
     return thread_;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 712eabc..bec1c90 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1884,9 +1884,7 @@
   }
 
   // Threads with no managed stack frames should be shown.
-  const ManagedStack* managed_stack = thread->GetManagedStack();
-  if (managed_stack == nullptr || (managed_stack->GetTopQuickFrame() == nullptr &&
-      managed_stack->GetTopShadowFrame() == nullptr)) {
+  if (!thread->HasManagedStack()) {
     return true;
   }
 
diff --git a/runtime/thread.h b/runtime/thread.h
index 39be66d..0803975 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -474,13 +474,16 @@
     tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
   }
 
+  void SetTopOfStackTagged(ArtMethod** top_method) {
+    tlsPtr_.managed_stack.SetTopQuickFrameTagged(top_method);
+  }
+
   void SetTopOfShadowStack(ShadowFrame* top) {
     tlsPtr_.managed_stack.SetTopShadowFrame(top);
   }
 
   bool HasManagedStack() const {
-    return (tlsPtr_.managed_stack.GetTopQuickFrame() != nullptr) ||
-        (tlsPtr_.managed_stack.GetTopShadowFrame() != nullptr);
+    return tlsPtr_.managed_stack.HasTopQuickFrame() || tlsPtr_.managed_stack.HasTopShadowFrame();
   }
 
   // If 'msg' is null, no detail message is set.
@@ -833,7 +836,7 @@
   static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
     return ThreadOffsetFromTlsPtr<pointer_size>(
         OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
-        ManagedStack::TopQuickFrameOffset());
+        ManagedStack::TaggedTopQuickFrameOffset());
   }
 
   const ManagedStack* GetManagedStack() const {
diff --git a/test/099-vmdebug/expected.txt b/test/099-vmdebug/expected.txt
index b8d72f6..f7801de 100644
--- a/test/099-vmdebug/expected.txt
+++ b/test/099-vmdebug/expected.txt
@@ -23,3 +23,9 @@
 Instances of ClassA assignable 3
 Array counts [2, 1, 0]
 Array counts assignable [3, 1, 0]
+ClassD got 3, combined mask: 13
+ClassE got 2, combined mask: 18
+null got 0
+ClassD assignable got 5, combined mask: 31
+ClassE assignable got 2, combined mask: 18
+null assignable got 0
diff --git a/test/099-vmdebug/info.txt b/test/099-vmdebug/info.txt
index 7f88086..873429e 100644
--- a/test/099-vmdebug/info.txt
+++ b/test/099-vmdebug/info.txt
@@ -1 +1 @@
-Tests of private dalvik.system.VMDebug support for method tracing.
+Tests of dalvik.system.VMDebug APIs.
diff --git a/test/099-vmdebug/src/Main.java b/test/099-vmdebug/src/Main.java
index 90ad315..e0d829a 100644
--- a/test/099-vmdebug/src/Main.java
+++ b/test/099-vmdebug/src/Main.java
@@ -33,6 +33,7 @@
         }
         testMethodTracing();
         testCountInstances();
+        testGetInstances();
         testRuntimeStat();
         testRuntimeStats();
     }
@@ -249,6 +250,59 @@
         System.out.println("Array counts assignable " + Arrays.toString(counts));
     }
 
+    static class ClassD {
+        public int mask;
+
+        public ClassD(int mask) {
+            this.mask = mask;
+        }
+    }
+
+    static class ClassE extends ClassD {
+        public ClassE(int mask) {
+            super(mask);
+        }
+    }
+
+    private static void testGetInstances() throws Exception {
+        ArrayList<Object> l = new ArrayList<Object>();
+        l.add(new ClassD(0x01));
+        l.add(new ClassE(0x02));
+        l.add(new ClassD(0x04));
+        l.add(new ClassD(0x08));
+        l.add(new ClassE(0x10));
+        Runtime.getRuntime().gc();
+        Class<?>[] classes = new Class<?>[] {ClassD.class, ClassE.class, null};
+        Object[][] instances = VMDebug.getInstancesOfClasses(classes, false);
+
+        int mask = 0;
+        for (Object instance : instances[0]) {
+            mask |= ((ClassD)instance).mask;
+        }
+        System.out.println("ClassD got " + instances[0].length + ", combined mask: " + mask);
+
+        mask = 0;
+        for (Object instance : instances[1]) {
+            mask |= ((ClassD)instance).mask;
+        }
+        System.out.println("ClassE got " + instances[1].length + ", combined mask: " + mask);
+        System.out.println("null got " + instances[2].length);
+
+        instances = VMDebug.getInstancesOfClasses(classes, true);
+        mask = 0;
+        for (Object instance : instances[0]) {
+            mask |= ((ClassD)instance).mask;
+        }
+        System.out.println("ClassD assignable got " + instances[0].length + ", combined mask: " + mask);
+
+        mask = 0;
+        for (Object instance : instances[1]) {
+            mask |= ((ClassD)instance).mask;
+        }
+        System.out.println("ClassE assignable got " + instances[1].length + ", combined mask: " + mask);
+        System.out.println("null assignable got " + instances[2].length);
+    }
+
     private static class VMDebug {
         private static final Method startMethodTracingMethod;
         private static final Method stopMethodTracingMethod;
@@ -257,6 +311,7 @@
         private static final Method getRuntimeStatsMethod;
         private static final Method countInstancesOfClassMethod;
         private static final Method countInstancesOfClassesMethod;
+        private static final Method getInstancesOfClassesMethod;
         static {
             try {
                 Class<?> c = Class.forName("dalvik.system.VMDebug");
@@ -270,6 +325,8 @@
                         Class.class, Boolean.TYPE);
                 countInstancesOfClassesMethod = c.getDeclaredMethod("countInstancesOfClasses",
                         Class[].class, Boolean.TYPE);
+                getInstancesOfClassesMethod = c.getDeclaredMethod("getInstancesOfClasses",
+                        Class[].class, Boolean.TYPE);
             } catch (Exception e) {
                 throw new RuntimeException(e);
             }
@@ -300,5 +357,9 @@
             return (long[]) countInstancesOfClassesMethod.invoke(
                     null, new Object[]{classes, assignable});
         }
+        public static Object[][] getInstancesOfClasses(Class<?>[] classes, boolean assignable) throws Exception {
+            return (Object[][]) getInstancesOfClassesMethod.invoke(
+                    null, new Object[]{classes, assignable});
+        }
     }
 }
diff --git a/test/655-jit-clinit/src/Main.java b/test/655-jit-clinit/src/Main.java
index 44b3154..2fb8f2a 100644
--- a/test/655-jit-clinit/src/Main.java
+++ b/test/655-jit-clinit/src/Main.java
@@ -23,7 +23,7 @@
     Foo.hotMethod();
   }
 
-  public native static boolean isJitCompiled(Class<?> cls, String methodName);
+  public native static boolean hasJitCompiledEntrypoint(Class<?> cls, String methodName);
   private native static boolean hasJit();
 }
 
@@ -36,7 +36,7 @@
 
   static {
     array = new Object[10000];
-    while (!Main.isJitCompiled(Foo.class, "hotMethod")) {
+    while (!Main.hasJitCompiledEntrypoint(Foo.class, "hotMethod")) {
       Foo.hotMethod();
       try {
         // Sleep to give a chance for the JIT to compile `hotMethod`.
diff --git a/test/667-jit-jni-stub/expected.txt b/test/667-jit-jni-stub/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/667-jit-jni-stub/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/667-jit-jni-stub/info.txt b/test/667-jit-jni-stub/info.txt
new file mode 100644
index 0000000..6f25c44
--- /dev/null
+++ b/test/667-jit-jni-stub/info.txt
@@ -0,0 +1 @@
+Tests for JITting and collecting JNI stubs.
diff --git a/test/667-jit-jni-stub/jit_jni_stub_test.cc b/test/667-jit-jni-stub/jit_jni_stub_test.cc
new file mode 100644
index 0000000..82e06fc
--- /dev/null
+++ b/test/667-jit-jni-stub/jit_jni_stub_test.cc
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <jni.h>
+
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
+#include "mirror/class.h"
+#include "mirror/string.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+
+namespace art {
+
+// Local class declared as a friend of JitCodeCache so that we can access its internals.
+class JitJniStubTestHelper {
+ public:
+  static bool isNextJitGcFull(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
+    CHECK(Runtime::Current()->GetJit() != nullptr);
+    jit::JitCodeCache* cache = Runtime::Current()->GetJit()->GetCodeCache();
+    MutexLock mu(self, cache->lock_);
+    return cache->ShouldDoFullCollection();
+  }
+};
+
+// Calls through to a static method with signature "()V".
+extern "C" JNIEXPORT
+void Java_Main_callThrough(JNIEnv* env, jclass, jclass klass, jstring methodName) {
+  ScopedObjectAccess soa(Thread::Current());
+  std::string name = soa.Decode<mirror::String>(methodName)->ToModifiedUtf8();
+  jmethodID method = env->GetStaticMethodID(klass, name.c_str(), "()V");
+  CHECK(method != nullptr) << soa.Decode<mirror::Class>(klass)->PrettyDescriptor() << "." << name;
+  env->CallStaticVoidMethod(klass, method);
+}
+
+extern "C" JNIEXPORT
+void Java_Main_jitGc(JNIEnv*, jclass) {
+  CHECK(Runtime::Current()->GetJit() != nullptr);
+  jit::JitCodeCache* cache = Runtime::Current()->GetJit()->GetCodeCache();
+  ScopedObjectAccess soa(Thread::Current());
+  cache->GarbageCollectCache(Thread::Current());
+}
+
+extern "C" JNIEXPORT
+jboolean Java_Main_isNextJitGcFull(JNIEnv*, jclass) {
+  ScopedObjectAccess soa(Thread::Current());
+  return JitJniStubTestHelper::isNextJitGcFull(soa.Self());
+}
+
+}  // namespace art
diff --git a/test/667-jit-jni-stub/run b/test/667-jit-jni-stub/run
new file mode 100755
index 0000000..f235c6b
--- /dev/null
+++ b/test/667-jit-jni-stub/run
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Disable AOT compilation of JNI stubs.
+# Ensure this test is not subject to unexpected code collection.
+${RUN} "${@}" --no-prebuild --no-dex2oat --runtime-option -Xjitinitialsize:32M
diff --git a/test/667-jit-jni-stub/src/Main.java b/test/667-jit-jni-stub/src/Main.java
new file mode 100644
index 0000000..b867970
--- /dev/null
+++ b/test/667-jit-jni-stub/src/Main.java
@@ -0,0 +1,180 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    System.loadLibrary(args[0]);
+    if (isAotCompiled(Main.class, "hasJit")) {
+      throw new Error("This test must be run with --no-prebuild --no-dex2oat!");
+    }
+    if (!hasJit()) {
+      return;
+    }
+
+    testCompilationUseAndCollection();
+    testMixedFramesOnStack();
+  }
+
+  public static void testCompilationUseAndCollection() {
+    // Test that callThrough() can be JIT-compiled.
+    assertFalse(hasJitCompiledEntrypoint(Main.class, "callThrough"));
+    assertFalse(hasJitCompiledCode(Main.class, "callThrough"));
+    ensureCompiledCallThroughEntrypoint(/* call */ true);
+    assertTrue(hasJitCompiledEntrypoint(Main.class, "callThrough"));
+    assertTrue(hasJitCompiledCode(Main.class, "callThrough"));
+
+    // Use callThrough() once again now that the method has a JIT-compiled stub.
+    callThrough(Main.class, "doNothing");
+
+    // Test that GC with the JIT-compiled stub on the stack does not collect it.
+    // Also tests stack walk over the JIT-compiled stub.
+    callThrough(Main.class, "testGcWithCallThroughStubOnStack");
+
+    // Test that, when marking used methods before a full JIT GC, a single execution
+    // of the GenericJNI trampoline can save the compiled stub from being collected.
+    testSingleInvocationTriggersRecompilation();
+
+    // Test that the JNI compiled stub can actually be collected.
+    testStubCanBeCollected();
+  }
+
+  public static void testGcWithCallThroughStubOnStack() {
+    // Check that this method was called via JIT-compiled callThrough() stub.
+    assertTrue(hasJitCompiledEntrypoint(Main.class, "callThrough"));
+    // This assertion also exercises stack walk over the JIT-compiled callThrough() stub.
+    assertTrue(new Throwable().getStackTrace()[1].getMethodName().equals("callThrough"));
+
+    doJitGcsUntilFullJitGcIsScheduled();
+    // The callThrough() on the stack above this method is using the compiled stub,
+    // so the JIT GC should not remove the compiled code.
+    jitGc();
+    assertTrue(hasJitCompiledCode(Main.class, "callThrough"));
+  }
+
+  public static void testSingleInvocationTriggersRecompilation() {
+    // After scheduling a full JIT GC, single call through the GenericJNI
+    // trampoline should ensure that the compiled stub is used again.
+    doJitGcsUntilFullJitGcIsScheduled();
+    callThrough(Main.class, "doNothing");
+    ensureCompiledCallThroughEntrypoint(/* call */ false);  // Wait for the compilation task to run.
+    assertTrue(hasJitCompiledEntrypoint(Main.class, "callThrough"));
+    jitGc();  // This JIT GC should not collect the callThrough() stub.
+    assertTrue(hasJitCompiledCode(Main.class, "callThrough"));
+  }
+
+  public static void testMixedFramesOnStack() {
+    // Starts without a compiled JNI stub for callThrough().
+    assertFalse(hasJitCompiledEntrypoint(Main.class, "callThrough"));
+    assertFalse(hasJitCompiledCode(Main.class, "callThrough"));
+    callThrough(Main.class, "testMixedFramesOnStackStage2");
+    // We have just returned through the JIT-compiled JNI stub, so it must still
+    // be compiled (though not necessarily with the entrypoint pointing to it).
+    assertTrue(hasJitCompiledCode(Main.class, "callThrough"));
+    // Though the callThrough() is on the stack, that frame is using the GenericJNI
+    // and does not prevent the collection of the JNI stub.
+    testStubCanBeCollected();
+  }
+
+  public static void testMixedFramesOnStackStage2() {
+    // We cannot assert that callThrough() has no JIT compiled stub as that check
+    // may race against the compilation task. Just check the caller.
+    assertTrue(new Throwable().getStackTrace()[1].getMethodName().equals("callThrough"));
+    // Now ensure that the JNI stub is compiled and used.
+    ensureCompiledCallThroughEntrypoint(/* call */ true);
+    callThrough(Main.class, "testMixedFramesOnStackStage3");
+  }
+
+  public static void testMixedFramesOnStackStage3() {
+    // Check that this method was called via JIT-compiled callThrough() stub.
+    assertTrue(hasJitCompiledEntrypoint(Main.class, "callThrough"));
+    // This assertion also exercises stack walk over the JIT-compiled callThrough() stub.
+    assertTrue(new Throwable().getStackTrace()[1].getMethodName().equals("callThrough"));
+    // For a good measure, try a JIT GC.
+    jitGc();
+  }
+
+  public static void testStubCanBeCollected() {
+    assertTrue(hasJitCompiledCode(Main.class, "callThrough"));
+    doJitGcsUntilFullJitGcIsScheduled();
+    assertFalse(hasJitCompiledEntrypoint(Main.class, "callThrough"));
+    assertTrue(hasJitCompiledCode(Main.class, "callThrough"));
+    jitGc();  // JIT GC without callThrough() on the stack should collect the callThrough() stub.
+    assertFalse(hasJitCompiledEntrypoint(Main.class, "callThrough"));
+    assertFalse(hasJitCompiledCode(Main.class, "callThrough"));
+  }
+
+  public static void doJitGcsUntilFullJitGcIsScheduled() {
+    // We enter with a compiled stub for callThrough() but we also need the entrypoint to be set.
+    assertTrue(hasJitCompiledCode(Main.class, "callThrough"));
+    ensureCompiledCallThroughEntrypoint(/* call */ true);
+    // Perform JIT GC until the next GC is marked to do full collection.
+    do {
+      assertTrue(hasJitCompiledEntrypoint(Main.class, "callThrough"));
+      callThrough(Main.class, "jitGc");  // JIT GC with callThrough() safely on the stack.
+    } while (!isNextJitGcFull());
+    // The JIT GC before the full collection resets entrypoints and waits to see
+    // if the methods are still in use.
+    assertFalse(hasJitCompiledEntrypoint(Main.class, "callThrough"));
+    assertTrue(hasJitCompiledCode(Main.class, "callThrough"));
+  }
+
+  public static void ensureCompiledCallThroughEntrypoint(boolean call) {
+    int count = 0;
+    while (!hasJitCompiledEntrypoint(Main.class, "callThrough")) {
+      // If `call` is true, also exercise the `callThrough()` method to increase hotness.
+      int limit = call ? 1 << Math.min(count, 12) : 0;
+      for (int i = 0; i < limit; ++i) {
+        callThrough(Main.class, "doNothing");
+      }
+      try {
+        // Sleep to give a chance for the JIT to compile `hasJit` stub.
+        Thread.sleep(100);
+      } catch (Exception e) {
+        // Ignore
+      }
+      if (++count == 50) {
+        throw new Error("TIMEOUT");
+      }
+    };
+  }
+
+  public static void assertTrue(boolean value) {
+    if (!value) {
+      throw new AssertionError("Expected true!");
+    }
+  }
+
+  public static void assertFalse(boolean value) {
+    if (value) {
+      throw new AssertionError("Expected false!");
+    }
+  }
+
+  public static void doNothing() { }
+  public static void throwError() { throw new Error(); }
+
+  // Note that the callThrough()'s shorty differs from shorties of the other
+  // native methods used in this test because of the return type `void.`
+  public native static void callThrough(Class<?> cls, String methodName);
+
+  public native static void jitGc();
+  public native static boolean isNextJitGcFull();
+
+  public native static boolean isAotCompiled(Class<?> cls, String methodName);
+  public native static boolean hasJitCompiledEntrypoint(Class<?> cls, String methodName);
+  public native static boolean hasJitCompiledCode(Class<?> cls, String methodName);
+  private native static boolean hasJit();
+}
diff --git a/test/Android.bp b/test/Android.bp
index ace62c2..01e424d 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -385,6 +385,7 @@
         "656-annotation-lookup-generic-jni/test.cc",
         "661-oat-writer-layout/oat_writer_layout.cc",
         "664-aget-verifier/aget-verifier.cc",
+        "667-jit-jni-stub/jit_jni_stub_test.cc",
         "708-jit-cache-churn/jit.cc",
     ],
     shared_libs: [
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index df497c1..3458080 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -152,10 +152,10 @@
   return method->GetOatMethodQuickCode(kRuntimePointerSize) != nullptr;
 }
 
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isJitCompiled(JNIEnv* env,
-                                                              jclass,
-                                                              jclass cls,
-                                                              jstring method_name) {
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasJitCompiledEntrypoint(JNIEnv* env,
+                                                                         jclass,
+                                                                         jclass cls,
+                                                                         jstring method_name) {
   jit::Jit* jit = GetJitIfEnabled();
   if (jit == nullptr) {
     return false;
@@ -169,6 +169,23 @@
   return jit->GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode());
 }
 
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasJitCompiledCode(JNIEnv* env,
+                                                                   jclass,
+                                                                   jclass cls,
+                                                                   jstring method_name) {
+  jit::Jit* jit = GetJitIfEnabled();
+  if (jit == nullptr) {
+    return false;
+  }
+  Thread* self = Thread::Current();
+  ScopedObjectAccess soa(self);
+  ScopedUtfChars chars(env, method_name);
+  CHECK(chars.c_str() != nullptr);
+  ArtMethod* method = soa.Decode<mirror::Class>(cls)->FindDeclaredDirectMethodByName(
+        chars.c_str(), kRuntimePointerSize);
+  return jit->GetCodeCache()->ContainsMethod(method);
+}
+
 extern "C" JNIEXPORT void JNICALL Java_Main_ensureJitCompiled(JNIEnv* env,
                                                              jclass,
                                                              jclass cls,
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index e750382..554b8a5 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -874,7 +874,7 @@
   global run_all_configs
 
   parser = argparse.ArgumentParser(description="Runs all or a subset of the ART test suite.")
-  parser.add_argument('-t', '--test', dest='test', help='name of the test')
+  parser.add_argument('-t', '--test', action='append', dest='tests', help='name(s) of the test(s)')
   parser.add_argument('-j', type=int, dest='n_thread')
   parser.add_argument('--timeout', default=timeout, type=int, dest='timeout')
   for variant in TOTAL_VARIANTS_SET:
@@ -906,10 +906,12 @@
     options = setup_env_for_build_target(target_config[options['build_target']],
                                          parser, options)
 
-  test = ''
+  tests = None
   env.EXTRA_DISABLED_TESTS.update(set(options['skips']))
-  if options['test']:
-    test = parse_test_name(options['test'])
+  if options['tests']:
+    tests = set()
+    for test_name in options['tests']:
+      tests |= parse_test_name(test_name)
 
   for variant_type in VARIANT_TYPE_DICT:
     for variant in VARIANT_TYPE_DICT[variant_type]:
@@ -935,11 +937,11 @@
   if options['run_all']:
     run_all_configs = True
 
-  return test
+  return tests
 
 def main():
   gather_test_info()
-  user_requested_test = parse_option()
+  user_requested_tests = parse_option()
   setup_test_env()
   if build:
     build_targets = ''
@@ -956,8 +958,8 @@
     build_command += ' dist'
     if subprocess.call(build_command.split()):
       sys.exit(1)
-  if user_requested_test:
-    test_runner_thread = threading.Thread(target=run_tests, args=(user_requested_test,))
+  if user_requested_tests:
+    test_runner_thread = threading.Thread(target=run_tests, args=(user_requested_tests,))
   else:
     test_runner_thread = threading.Thread(target=run_tests, args=(RUN_TEST_SET,))
   test_runner_thread.daemon = True