Merge "Setup policies for register allocation."
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 9012f00..07d3a2a 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -4215,6 +4215,13 @@
   DISALLOW_COPY_AND_ASSIGN(StringTable);
 };
 
+static const char* GetMethodSourceFile(MethodHelper* mh)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  DCHECK(mh != nullptr);
+  const char* source_file = mh->GetDeclaringClassSourceFile();
+  return (source_file != nullptr) ? source_file : "";
+}
+
 /*
  * The data we send to DDMS contains everything we have recorded.
  *
@@ -4287,7 +4294,7 @@
           mh.ChangeMethod(m);
           class_names.Add(mh.GetDeclaringClassDescriptor());
           method_names.Add(mh.GetName());
-          filenames.Add(mh.GetDeclaringClassSourceFile());
+          filenames.Add(GetMethodSourceFile(&mh));
         }
       }
 
@@ -4349,7 +4356,7 @@
         mh.ChangeMethod(record->stack[stack_frame].method);
         size_t class_name_index = class_names.IndexOf(mh.GetDeclaringClassDescriptor());
         size_t method_name_index = method_names.IndexOf(mh.GetName());
-        size_t file_name_index = filenames.IndexOf(mh.GetDeclaringClassSourceFile());
+        size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(&mh));
         JDWP::Append2BE(bytes, class_name_index);
         JDWP::Append2BE(bytes, method_name_index);
         JDWP::Append2BE(bytes, file_name_index);
diff --git a/runtime/deoptimize_stack_visitor.cc b/runtime/deoptimize_stack_visitor.cc
index f2eaf00..3eb1792 100644
--- a/runtime/deoptimize_stack_visitor.cc
+++ b/runtime/deoptimize_stack_visitor.cc
@@ -78,7 +78,7 @@
   if (prev_shadow_frame_ != nullptr) {
     prev_shadow_frame_->SetLink(new_frame);
   } else {
-    exception_handler_->SetTopShadowFrame(new_frame);
+    self_->SetDeoptimizationShadowFrame(new_frame);
   }
   prev_shadow_frame_ = new_frame;
   return true;
diff --git a/runtime/deoptimize_stack_visitor.h b/runtime/deoptimize_stack_visitor.h
index c898e7d..c41b803 100644
--- a/runtime/deoptimize_stack_visitor.h
+++ b/runtime/deoptimize_stack_visitor.h
@@ -19,6 +19,7 @@
 
 #include "base/mutex.h"
 #include "stack.h"
+#include "thread.h"
 
 namespace art {
 
@@ -35,6 +36,7 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       : StackVisitor(self, context), self_(self), exception_handler_(exception_handler),
         prev_shadow_frame_(nullptr) {
+    CHECK(!self_->HasDeoptimizationShadowFrame());
   }
 
   bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
index c0304eb..bea7d96 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.cc
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -46,79 +46,4 @@
   }
 }
 
-static void WorkAroundJniBugsForJobject(intptr_t* arg_ptr)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  intptr_t value = *arg_ptr;
-  mirror::Object** value_as_jni_rep = reinterpret_cast<mirror::Object**>(value);
-  mirror::Object* value_as_work_around_rep = value_as_jni_rep != NULL ? *value_as_jni_rep : NULL;
-  CHECK(Runtime::Current()->GetHeap()->IsValidObjectAddress(value_as_work_around_rep))
-      << value_as_work_around_rep;
-  *arg_ptr = reinterpret_cast<intptr_t>(value_as_work_around_rep);
-}
-
-extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  DCHECK(Thread::Current() == self);
-  // TODO: this code is specific to ARM
-  // On entry the stack pointed by sp is:
-  // | arg3   | <- Calling JNI method's frame (and extra bit for out args)
-  // | LR     |
-  // | R3     |    arg2
-  // | R2     |    arg1
-  // | R1     |    jclass/jobject
-  // | R0     |    JNIEnv
-  // | unused |
-  // | unused |
-  // | unused | <- sp
-  mirror::ArtMethod* jni_method = self->GetCurrentMethod(NULL);
-  DCHECK(jni_method->IsNative()) << PrettyMethod(jni_method);
-  intptr_t* arg_ptr = sp + 4;  // pointer to r1 on stack
-  // Fix up this/jclass argument
-  WorkAroundJniBugsForJobject(arg_ptr);
-  arg_ptr++;
-  // Fix up jobject arguments
-  MethodHelper mh(jni_method);
-  int reg_num = 2;  // Current register being processed, -1 for stack arguments.
-  for (uint32_t i = 1; i < mh.GetShortyLength(); i++) {
-    char shorty_char = mh.GetShorty()[i];
-    if (shorty_char == 'L') {
-      WorkAroundJniBugsForJobject(arg_ptr);
-    }
-    if (shorty_char == 'J' || shorty_char == 'D') {
-      if (reg_num == 2) {
-        arg_ptr = sp + 8;  // skip to out arguments
-        reg_num = -1;
-      } else if (reg_num == 3) {
-        arg_ptr = sp + 10;  // skip to out arguments plus 2 slots as long must be aligned
-        reg_num = -1;
-      } else {
-        DCHECK_EQ(reg_num, -1);
-        if ((reinterpret_cast<intptr_t>(arg_ptr) & 7) == 4) {
-          arg_ptr += 3;  // unaligned, pad and move through stack arguments
-        } else {
-          arg_ptr += 2;  // aligned, move through stack arguments
-        }
-      }
-    } else {
-      if (reg_num == 2) {
-        arg_ptr++;  // move through register arguments
-        reg_num++;
-      } else if (reg_num == 3) {
-        arg_ptr = sp + 8;  // skip to outgoing stack arguments
-        reg_num = -1;
-      } else {
-        DCHECK_EQ(reg_num, -1);
-        arg_ptr++;  // move through stack arguments
-      }
-    }
-  }
-  // Load expected destination, see Method::RegisterNative
-  const void* code = reinterpret_cast<const void*>(jni_method->GetNativeGcMap());
-  if (UNLIKELY(code == NULL)) {
-    code = GetJniDlsymLookupStub();
-    jni_method->RegisterNative(self, code, false);
-  }
-  return code;
-}
-
 }  // namespace art
diff --git a/runtime/entrypoints/portable/portable_thread_entrypoints.cc b/runtime/entrypoints/portable/portable_thread_entrypoints.cc
index 4f19964..9e62e0e 100644
--- a/runtime/entrypoints/portable/portable_thread_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_thread_entrypoints.cc
@@ -78,7 +78,7 @@
     visitor.WalkStack(true);
     self->SetDeoptimizationShadowFrame(visitor.GetShadowFrameCopy());
     self->SetDeoptimizationReturnValue(JValue());
-    self->SetException(ThrowLocation(), reinterpret_cast<mirror::Throwable*>(-1));
+    self->SetException(ThrowLocation(), Thread::GetDeoptimizationException());
   }
 }
 
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 044216e..bbbd1ed 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -61,9 +61,10 @@
 class RememberedSetReferenceVisitor {
  public:
   RememberedSetReferenceVisitor(MarkHeapReferenceCallback* callback,
+                                DelayReferenceReferentCallback* ref_callback,
                                 space::ContinuousSpace* target_space,
                                 bool* const contains_reference_to_target_space, void* arg)
-      : callback_(callback), target_space_(target_space), arg_(arg),
+      : callback_(callback), ref_callback_(ref_callback), target_space_(target_space), arg_(arg),
         contains_reference_to_target_space_(contains_reference_to_target_space) {}
 
   void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
@@ -77,8 +78,18 @@
     }
   }
 
+  void operator()(mirror::Class* klass, mirror::Reference* ref) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+    if (target_space_->HasAddress(ref->GetReferent())) {
+      *contains_reference_to_target_space_ = true;
+      ref_callback_(klass, ref, arg_);
+    }
+  }
+
  private:
   MarkHeapReferenceCallback* const callback_;
+  DelayReferenceReferentCallback* const ref_callback_;
   space::ContinuousSpace* const target_space_;
   void* const arg_;
   bool* const contains_reference_to_target_space_;
@@ -87,30 +98,33 @@
 class RememberedSetObjectVisitor {
  public:
   RememberedSetObjectVisitor(MarkHeapReferenceCallback* callback,
+                             DelayReferenceReferentCallback* ref_callback,
                              space::ContinuousSpace* target_space,
                              bool* const contains_reference_to_target_space, void* arg)
-      : callback_(callback), target_space_(target_space), arg_(arg),
+      : callback_(callback), ref_callback_(ref_callback), target_space_(target_space), arg_(arg),
         contains_reference_to_target_space_(contains_reference_to_target_space) {}
 
   void operator()(mirror::Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    RememberedSetReferenceVisitor ref_visitor(callback_, target_space_,
-                                              contains_reference_to_target_space_, arg_);
-    obj->VisitReferences<kMovingClasses>(ref_visitor);
+    RememberedSetReferenceVisitor visitor(callback_, ref_callback_, target_space_,
+                                          contains_reference_to_target_space_, arg_);
+    obj->VisitReferences<kMovingClasses>(visitor, visitor);
   }
 
  private:
   MarkHeapReferenceCallback* const callback_;
+  DelayReferenceReferentCallback* const ref_callback_;
   space::ContinuousSpace* const target_space_;
   void* const arg_;
   bool* const contains_reference_to_target_space_;
 };
 
 void RememberedSet::UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
+                                            DelayReferenceReferentCallback* ref_callback,
                                             space::ContinuousSpace* target_space, void* arg) {
   CardTable* card_table = heap_->GetCardTable();
   bool contains_reference_to_target_space = false;
-  RememberedSetObjectVisitor obj_visitor(callback, target_space,
+  RememberedSetObjectVisitor obj_visitor(callback, ref_callback, target_space,
                                          &contains_reference_to_target_space, arg);
   ContinuousSpaceBitmap* bitmap = space_->GetLiveBitmap();
   CardSet remove_card_set;
diff --git a/runtime/gc/accounting/remembered_set.h b/runtime/gc/accounting/remembered_set.h
index 4ed20dd..e3d8537 100644
--- a/runtime/gc/accounting/remembered_set.h
+++ b/runtime/gc/accounting/remembered_set.h
@@ -53,6 +53,7 @@
 
   // Mark through all references to the target space.
   void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
+                               DelayReferenceReferentCallback* ref_callback,
                                space::ContinuousSpace* target_space, void* arg)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 821aa2d..ff59016 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -600,8 +600,12 @@
     // Use a thread-local run.
     Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
     // Allow invalid since this will always fail the allocation.
-    DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
-    DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
+    if (kIsDebugBuild) {
+      // Need the lock to prevent race conditions.
+      MutexLock mu(self, *size_bracket_locks_[idx]);
+      CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
+      CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
+    }
     DCHECK(thread_local_run != nullptr);
     DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_);
     slot_addr = thread_local_run->AllocSlot();
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 6380cba..615ec98 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -65,6 +65,7 @@
   ThreadList* thread_list = Runtime::Current()->GetThreadList();
   Thread* self = Thread::Current();
   uint64_t start_time = NanoTime();
+  timings_.Reset();
   pause_times_.clear();
   duration_ns_ = 0;
   clear_soft_references_ = clear_soft_references;
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 8af4fd8..007eb23 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -104,7 +104,6 @@
 }
 
 void MarkSweep::InitializePhase() {
-  timings_.Reset();
   TimingLogger::ScopedSplit split("InitializePhase", &timings_);
   mark_stack_ = heap_->mark_stack_.get();
   DCHECK(mark_stack_ != nullptr);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index b67bbb1..3b9e853 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -110,7 +110,6 @@
 }
 
 void SemiSpace::InitializePhase() {
-  timings_.Reset();
   TimingLogger::ScopedSplit split("InitializePhase", &timings_);
   mark_stack_ = heap_->mark_stack_.get();
   DCHECK(mark_stack_ != nullptr);
@@ -334,7 +333,8 @@
       accounting::RememberedSet* rem_set = heap_->FindRememberedSetFromSpace(space);
       if (kUseRememberedSet) {
         DCHECK(rem_set != nullptr);
-        rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, from_space_, this);
+        rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, DelayReferenceReferentCallback,
+                                         from_space_, this);
         if (kIsDebugBuild) {
           // Verify that there are no from-space references that
           // remain in the space, that is, the remembered set (and the
@@ -603,6 +603,11 @@
   reinterpret_cast<SemiSpace*>(arg)->MarkObject(obj_ptr);
 }
 
+void SemiSpace::DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref,
+                                               void* arg) {
+  reinterpret_cast<SemiSpace*>(arg)->DelayReferenceReferent(klass, ref);
+}
+
 void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
                                  RootType /*root_type*/) {
   auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root);
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 3d635f0..51b0869 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -138,6 +138,10 @@
   static void ProcessMarkStackCallback(void* arg)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
 
+  static void DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref,
+                                             void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
   virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 5d72bc1..a9799b9 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1910,36 +1910,37 @@
   EnqueueClearedReferences();
   // Grow the heap so that we know when to perform the next GC.
   GrowForUtilization(collector);
-  if (CareAboutPauseTimes()) {
-    const size_t duration = collector->GetDurationNs();
-    std::vector<uint64_t> pauses = collector->GetPauseTimes();
+  const size_t duration = collector->GetDurationNs();
+  const std::vector<uint64_t>& pause_times = collector->GetPauseTimes();
+  // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
+  // (mutator time blocked >=  long_pause_log_threshold_).
+  bool log_gc = gc_cause == kGcCauseExplicit;
+  if (!log_gc && CareAboutPauseTimes()) {
     // GC for alloc pauses the allocating thread, so consider it as a pause.
-    bool was_slow = duration > long_gc_log_threshold_ ||
+    log_gc = duration > long_gc_log_threshold_ ||
         (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
-    if (!was_slow) {
-      for (uint64_t pause : pauses) {
-        was_slow = was_slow || pause > long_pause_log_threshold_;
-      }
+    for (uint64_t pause : pause_times) {
+      log_gc = log_gc || pause >= long_pause_log_threshold_;
     }
-    if (was_slow) {
-        const size_t percent_free = GetPercentFree();
-        const size_t current_heap_size = GetBytesAllocated();
-        const size_t total_memory = GetTotalMemory();
-        std::ostringstream pause_string;
-        for (size_t i = 0; i < pauses.size(); ++i) {
-            pause_string << PrettyDuration((pauses[i] / 1000) * 1000)
-                         << ((i != pauses.size() - 1) ? ", " : "");
-        }
-        LOG(INFO) << gc_cause << " " << collector->GetName()
-                  << " GC freed "  <<  collector->GetFreedObjects() << "("
-                  << PrettySize(collector->GetFreedBytes()) << ") AllocSpace objects, "
-                  << collector->GetFreedLargeObjects() << "("
-                  << PrettySize(collector->GetFreedLargeObjectBytes()) << ") LOS objects, "
-                  << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
-                  << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
-                  << " total " << PrettyDuration((duration / 1000) * 1000);
-        VLOG(heap) << ConstDumpable<TimingLogger>(collector->GetTimings());
+  }
+  if (log_gc) {
+    const size_t percent_free = GetPercentFree();
+    const size_t current_heap_size = GetBytesAllocated();
+    const size_t total_memory = GetTotalMemory();
+    std::ostringstream pause_string;
+    for (size_t i = 0; i < pause_times.size(); ++i) {
+        pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
+                     << ((i != pause_times.size() - 1) ? ", " : "");
     }
+    LOG(INFO) << gc_cause << " " << collector->GetName()
+              << " GC freed "  <<  collector->GetFreedObjects() << "("
+              << PrettySize(collector->GetFreedBytes()) << ") AllocSpace objects, "
+              << collector->GetFreedLargeObjects() << "("
+              << PrettySize(collector->GetFreedLargeObjectBytes()) << ") LOS objects, "
+              << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
+              << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
+              << " total " << PrettyDuration((duration / 1000) * 1000);
+    VLOG(heap) << ConstDumpable<TimingLogger>(collector->GetTimings());
   }
   FinishGC(self, gc_type);
   ATRACE_END();
diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h
index 9198c90..767c197 100644
--- a/runtime/object_callbacks.h
+++ b/runtime/object_callbacks.h
@@ -24,8 +24,10 @@
 
 namespace art {
 namespace mirror {
+class Class;
 class Object;
 template<class MirrorType> class HeapReference;
+class Reference;
 }  // namespace mirror
 class StackVisitor;
 
@@ -59,6 +61,7 @@
     const StackVisitor* visitor, RootType root_type);
 
 typedef void (MarkHeapReferenceCallback)(mirror::HeapReference<mirror::Object>* ref, void* arg);
+typedef void (DelayReferenceReferentCallback)(mirror::Class* klass, mirror::Reference* ref, void* arg);
 
 // A callback for testing if an object is marked, returns nullptr if not marked, otherwise the new
 // address the object (if the object didn't move, returns the object input parameter).
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index d5844b6..a91fdf1 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -28,7 +28,7 @@
     method_tracing_active_(is_deoptimization ||
                            Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()),
     handler_quick_frame_(nullptr), handler_quick_frame_pc_(0), handler_dex_pc_(0),
-    clear_exception_(false), top_shadow_frame_(nullptr), handler_frame_id_(kInvalidFrameId) {
+    clear_exception_(false), handler_frame_id_(kInvalidFrameId) {
 }
 
 void QuickExceptionHandler::FindCatch(const ThrowLocation& throw_location,
@@ -125,10 +125,6 @@
 }
 
 void QuickExceptionHandler::DoLongJump() {
-  if (is_deoptimization_) {
-    // TODO: proper return value.
-    self_->SetDeoptimizationShadowFrame(top_shadow_frame_);
-  }
   // Place context back on thread so it will be available when we continue.
   self_->ReleaseLongJumpContext(context_);
   context_->SetSP(reinterpret_cast<uintptr_t>(handler_quick_frame_));
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index d06ce7c..ef3766c 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -66,10 +66,6 @@
     clear_exception_ = clear_exception;
   }
 
-  void SetTopShadowFrame(ShadowFrame* top_shadow_frame) {
-    top_shadow_frame_ = top_shadow_frame;
-  }
-
   void SetHandlerFrameId(size_t frame_id) {
     handler_frame_id_ = frame_id;
   }
@@ -88,8 +84,6 @@
   uint32_t handler_dex_pc_;
   // Should the exception be cleared as the catch block has no move-exception?
   bool clear_exception_;
-  // Deoptimization top shadow frame.
-  ShadowFrame* top_shadow_frame_;
   // Frame id of the catch handler or the upcall.
   size_t handler_frame_id_;
 
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 771680b..0fafbfa 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1932,92 +1932,102 @@
   bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     if (false) {
       LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
-          << StringPrintf("@ PC:%04x", GetDexPc());
+                << StringPrintf("@ PC:%04x", GetDexPc());
     }
     ShadowFrame* shadow_frame = GetCurrentShadowFrame();
     if (shadow_frame != nullptr) {
-      mirror::ArtMethod* m = shadow_frame->GetMethod();
-      size_t num_regs = shadow_frame->NumberOfVRegs();
-      if (m->IsNative() || shadow_frame->HasReferenceArray()) {
-        // SIRT for JNI or References for interpreter.
-        for (size_t reg = 0; reg < num_regs; ++reg) {
+      VisitShadowFrame(shadow_frame);
+    } else {
+      VisitQuickFrame();
+    }
+    return true;
+  }
+
+  void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mirror::ArtMethod* m = shadow_frame->GetMethod();
+    size_t num_regs = shadow_frame->NumberOfVRegs();
+    if (m->IsNative() || shadow_frame->HasReferenceArray()) {
+      // SIRT for JNI or References for interpreter.
+      for (size_t reg = 0; reg < num_regs; ++reg) {
+        mirror::Object* ref = shadow_frame->GetVRegReference(reg);
+        if (ref != nullptr) {
+          mirror::Object* new_ref = ref;
+          visitor_(&new_ref, reg, this);
+          if (new_ref != ref) {
+            shadow_frame->SetVRegReference(reg, new_ref);
+          }
+        }
+      }
+    } else {
+      // Java method.
+      // Portable path use DexGcMap and store in Method.native_gc_map_.
+      const uint8_t* gc_map = m->GetNativeGcMap();
+      CHECK(gc_map != nullptr) << PrettyMethod(m);
+      verifier::DexPcToReferenceMap dex_gc_map(gc_map);
+      uint32_t dex_pc = shadow_frame->GetDexPC();
+      const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
+      DCHECK(reg_bitmap != nullptr);
+      num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs);
+      for (size_t reg = 0; reg < num_regs; ++reg) {
+        if (TestBitmap(reg, reg_bitmap)) {
           mirror::Object* ref = shadow_frame->GetVRegReference(reg);
           if (ref != nullptr) {
             mirror::Object* new_ref = ref;
             visitor_(&new_ref, reg, this);
             if (new_ref != ref) {
-             shadow_frame->SetVRegReference(reg, new_ref);
-            }
-          }
-        }
-      } else {
-        // Java method.
-        // Portable path use DexGcMap and store in Method.native_gc_map_.
-        const uint8_t* gc_map = m->GetNativeGcMap();
-        CHECK(gc_map != nullptr) << PrettyMethod(m);
-        verifier::DexPcToReferenceMap dex_gc_map(gc_map);
-        uint32_t dex_pc = GetDexPc();
-        const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
-        DCHECK(reg_bitmap != nullptr);
-        num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs);
-        for (size_t reg = 0; reg < num_regs; ++reg) {
-          if (TestBitmap(reg, reg_bitmap)) {
-            mirror::Object* ref = shadow_frame->GetVRegReference(reg);
-            if (ref != nullptr) {
-              mirror::Object* new_ref = ref;
-              visitor_(&new_ref, reg, this);
-              if (new_ref != ref) {
-               shadow_frame->SetVRegReference(reg, new_ref);
-              }
+              shadow_frame->SetVRegReference(reg, new_ref);
             }
           }
         }
       }
-    } else {
-      mirror::ArtMethod* m = GetMethod();
-      // Process register map (which native and runtime methods don't have)
-      if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
-        const uint8_t* native_gc_map = m->GetNativeGcMap();
-        CHECK(native_gc_map != nullptr) << PrettyMethod(m);
-        mh_.ChangeMethod(m);
-        const DexFile::CodeItem* code_item = mh_.GetCodeItem();
-        DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be nullptr or how would we compile its instructions?
-        NativePcOffsetToReferenceMap map(native_gc_map);
-        size_t num_regs = std::min(map.RegWidth() * 8,
-                                   static_cast<size_t>(code_item->registers_size_));
-        if (num_regs > 0) {
-          const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset());
-          DCHECK(reg_bitmap != nullptr);
-          const VmapTable vmap_table(m->GetVmapTable());
-          uint32_t core_spills = m->GetCoreSpillMask();
-          uint32_t fp_spills = m->GetFpSpillMask();
-          size_t frame_size = m->GetFrameSizeInBytes();
-          // For all dex registers in the bitmap
-          mirror::ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
-          DCHECK(cur_quick_frame != nullptr);
-          for (size_t reg = 0; reg < num_regs; ++reg) {
-            // Does this register hold a reference?
-            if (TestBitmap(reg, reg_bitmap)) {
-              uint32_t vmap_offset;
-              if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
-                int vmap_reg = vmap_table.ComputeRegister(core_spills, vmap_offset, kReferenceVReg);
-                // This is sound as spilled GPRs will be word sized (ie 32 or 64bit).
-                mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(vmap_reg));
-                if (*ref_addr != nullptr) {
-                  visitor_(ref_addr, reg, this);
-                }
-              } else {
-                StackReference<mirror::Object>* ref_addr =
-                    reinterpret_cast<StackReference<mirror::Object>*>(
-                        GetVRegAddr(cur_quick_frame, code_item, core_spills, fp_spills, frame_size,
-                                    reg));
-                mirror::Object* ref = ref_addr->AsMirrorPtr();
-                if (ref != nullptr) {
-                  mirror::Object* new_ref = ref;
-                  visitor_(&new_ref, reg, this);
-                  if (ref != new_ref) {
-                    ref_addr->Assign(new_ref);
-                  }
+    }
+  }
+
+ private:
+  void VisitQuickFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mirror::ArtMethod* m = GetMethod();
+    // Process register map (which native and runtime methods don't have)
+    if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
+      const uint8_t* native_gc_map = m->GetNativeGcMap();
+      CHECK(native_gc_map != nullptr) << PrettyMethod(m);
+      mh_.ChangeMethod(m);
+      const DexFile::CodeItem* code_item = mh_.GetCodeItem();
+      DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be nullptr or how would we compile its instructions?
+      NativePcOffsetToReferenceMap map(native_gc_map);
+      size_t num_regs = std::min(map.RegWidth() * 8,
+                                 static_cast<size_t>(code_item->registers_size_));
+      if (num_regs > 0) {
+        const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset());
+        DCHECK(reg_bitmap != nullptr);
+        const VmapTable vmap_table(m->GetVmapTable());
+        uint32_t core_spills = m->GetCoreSpillMask();
+        uint32_t fp_spills = m->GetFpSpillMask();
+        size_t frame_size = m->GetFrameSizeInBytes();
+        // For all dex registers in the bitmap
+        mirror::ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
+        DCHECK(cur_quick_frame != nullptr);
+        for (size_t reg = 0; reg < num_regs; ++reg) {
+          // Does this register hold a reference?
+          if (TestBitmap(reg, reg_bitmap)) {
+            uint32_t vmap_offset;
+            if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
+              int vmap_reg = vmap_table.ComputeRegister(core_spills, vmap_offset, kReferenceVReg);
+              // This is sound as spilled GPRs will be word sized (ie 32 or 64bit).
+              mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(vmap_reg));
+              if (*ref_addr != nullptr) {
+                visitor_(ref_addr, reg, this);
+              }
+            } else {
+              StackReference<mirror::Object>* ref_addr =
+                  reinterpret_cast<StackReference<mirror::Object>*>(
+                      GetVRegAddr(cur_quick_frame, code_item, core_spills, fp_spills, frame_size,
+                                  reg));
+              mirror::Object* ref = ref_addr->AsMirrorPtr();
+              if (ref != nullptr) {
+                mirror::Object* new_ref = ref;
+                visitor_(&new_ref, reg, this);
+                if (ref != new_ref) {
+                  ref_addr->Assign(new_ref);
                 }
               }
             }
@@ -2025,10 +2035,8 @@
         }
       }
     }
-    return true;
   }
 
- private:
   static bool TestBitmap(size_t reg, const uint8_t* reg_vector) {
     return ((reg_vector[reg / kBitsPerByte] >> (reg % kBitsPerByte)) & 0x01) != 0;
   }
@@ -2085,6 +2093,14 @@
   if (tlsPtr_.single_step_control != nullptr) {
     tlsPtr_.single_step_control->VisitRoots(visitor, arg, thread_id, kRootDebugger);
   }
+  if (tlsPtr_.deoptimization_shadow_frame != nullptr) {
+    RootCallbackVisitor visitorToCallback(visitor, arg, thread_id);
+    ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitorToCallback);
+    for (ShadowFrame* shadow_frame = tlsPtr_.deoptimization_shadow_frame; shadow_frame != nullptr;
+        shadow_frame = shadow_frame->GetLink()) {
+      mapper.VisitShadowFrame(shadow_frame);
+    }
+  }
   // Visit roots on this thread's stack
   Context* context = GetLongJumpContext();
   RootCallbackVisitor visitorToCallback(visitor, arg, thread_id);
diff --git a/runtime/thread.h b/runtime/thread.h
index e5e4cae..f869285 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -696,6 +696,10 @@
 
   ShadowFrame* GetAndClearDeoptimizationShadowFrame(JValue* ret_val);
 
+  bool HasDeoptimizationShadowFrame() const {
+    return tlsPtr_.deoptimization_shadow_frame != nullptr;
+  }
+
   std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
     return tlsPtr_.instrumentation_stack;
   }
diff --git a/test/ThreadStress/ThreadStress.java b/test/ThreadStress/ThreadStress.java
index 795c790..5dccc68 100644
--- a/test/ThreadStress/ThreadStress.java
+++ b/test/ThreadStress/ThreadStress.java
@@ -14,13 +14,15 @@
  * limitations under the License.
  */
 
+import android.system.ErrnoException;
+import android.system.Os;
+import android.system.OsConstants;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import libcore.io.*;
 
 // Run on host with:
 //   javac ThreadTest.java && java ThreadStress && rm *.class
@@ -202,7 +204,7 @@
                     }
                     case SIGQUIT: {
                         try {
-                            Libcore.os.kill(Libcore.os.getpid(), OsConstants.SIGQUIT);
+                            Os.kill(Os.getpid(), OsConstants.SIGQUIT);
                         } catch (ErrnoException ex) {
                         }
                     }