Replace StackHandleScopeCollection with VariableSizedHandleScope

VariableSizedHandleScope's internal handle scopes are not pushed
directly on the thread. This means that it is safe to intermix with
other types of handle scopes.

Added test.

Test: clean-oat-host && test-art-host

Change-Id: Id2fd1155788428f394d49615d337d9134824c8f0
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index d8a6ba9..502ce4b 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -892,7 +892,7 @@
                                        std::vector<uint64_t>* counts) {
   gc::Heap* heap = Runtime::Current()->GetHeap();
   heap->CollectGarbage(false);
-  StackHandleScopeCollection hs(Thread::Current());
+  VariableSizedHandleScope hs(Thread::Current());
   std::vector<Handle<mirror::Class>> classes;
   counts->clear();
   for (size_t i = 0; i < class_ids.size(); ++i) {
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
index cceb007..b212d09 100644
--- a/runtime/handle_scope-inl.h
+++ b/runtime/handle_scope-inl.h
@@ -28,24 +28,30 @@
 namespace art {
 
 template<size_t kNumReferences>
-inline StackHandleScope<kNumReferences>::StackHandleScope(Thread* self, mirror::Object* fill_value)
-    : HandleScope(self->GetTopHandleScope(), kNumReferences), self_(self), pos_(0) {
-  DCHECK_EQ(self, Thread::Current());
+inline FixedSizeHandleScope<kNumReferences>::FixedSizeHandleScope(BaseHandleScope* link,
+                                                                  mirror::Object* fill_value)
+    : HandleScope(link, kNumReferences) {
   if (kDebugLocking) {
     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
   }
-  static_assert(kNumReferences >= 1, "StackHandleScope must contain at least 1 reference");
-  // TODO: Figure out how to use a compile assert.
-  CHECK_EQ(&storage_[0], GetReferences());
+  static_assert(kNumReferences >= 1, "FixedSizeHandleScope must contain at least 1 reference");
+  DCHECK_EQ(&storage_[0], GetReferences());  // TODO: Figure out how to use a compile assert.
   for (size_t i = 0; i < kNumReferences; ++i) {
     SetReference(i, fill_value);
   }
+}
+
+template<size_t kNumReferences>
+inline StackHandleScope<kNumReferences>::StackHandleScope(Thread* self, mirror::Object* fill_value)
+    : FixedSizeHandleScope<kNumReferences>(self->GetTopHandleScope(), fill_value),
+      self_(self) {
+  DCHECK_EQ(self, Thread::Current());
   self_->PushHandleScope(this);
 }
 
 template<size_t kNumReferences>
 inline StackHandleScope<kNumReferences>::~StackHandleScope() {
-  HandleScope* top_handle_scope = self_->PopHandleScope();
+  BaseHandleScope* top_handle_scope = self_->PopHandleScope();
   DCHECK_EQ(top_handle_scope, this);
   if (kDebugLocking) {
     Locks::mutator_lock_->AssertSharedHeld(self_);
@@ -66,7 +72,7 @@
 }
 
 inline mirror::Object* HandleScope::GetReference(size_t i) const {
-  DCHECK_LT(i, number_of_references_);
+  DCHECK_LT(i, NumberOfReferences());
   if (kDebugLocking) {
     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
   }
@@ -74,12 +80,12 @@
 }
 
 inline Handle<mirror::Object> HandleScope::GetHandle(size_t i) {
-  DCHECK_LT(i, number_of_references_);
+  DCHECK_LT(i, NumberOfReferences());
   return Handle<mirror::Object>(&GetReferences()[i]);
 }
 
 inline MutableHandle<mirror::Object> HandleScope::GetMutableHandle(size_t i) {
-  DCHECK_LT(i, number_of_references_);
+  DCHECK_LT(i, NumberOfReferences());
   return MutableHandle<mirror::Object>(&GetReferences()[i]);
 }
 
@@ -87,7 +93,7 @@
   if (kDebugLocking) {
     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
   }
-  DCHECK_LT(i, number_of_references_);
+  DCHECK_LT(i, NumberOfReferences());
   GetReferences()[i].Assign(object);
 }
 
@@ -95,13 +101,13 @@
   // A HandleScope should always contain something. One created by the
   // jni_compiler should have a jobject/jclass as a native method is
   // passed in a this pointer or a class
-  DCHECK_GT(number_of_references_, 0U);
+  DCHECK_GT(NumberOfReferences(), 0U);
   return &GetReferences()[0] <= handle_scope_entry &&
       handle_scope_entry <= &GetReferences()[number_of_references_ - 1];
 }
 
 template<size_t kNumReferences> template<class T>
-inline MutableHandle<T> StackHandleScope<kNumReferences>::NewHandle(T* object) {
+inline MutableHandle<T> FixedSizeHandleScope<kNumReferences>::NewHandle(T* object) {
   SetReference(pos_, object);
   MutableHandle<T> h(GetHandle<T>(pos_));
   pos_++;
@@ -109,24 +115,24 @@
 }
 
 template<size_t kNumReferences> template<class MirrorType, bool kPoison>
-inline MutableHandle<MirrorType> StackHandleScope<kNumReferences>::NewHandle(
+inline MutableHandle<MirrorType> FixedSizeHandleScope<kNumReferences>::NewHandle(
     ObjPtr<MirrorType, kPoison> object) {
   return NewHandle(object.Ptr());
 }
 
 template<size_t kNumReferences> template<class T>
-inline HandleWrapper<T> StackHandleScope<kNumReferences>::NewHandleWrapper(T** object) {
+inline HandleWrapper<T> FixedSizeHandleScope<kNumReferences>::NewHandleWrapper(T** object) {
   return HandleWrapper<T>(object, NewHandle(*object));
 }
 
 template<size_t kNumReferences> template<class T>
-inline HandleWrapperObjPtr<T> StackHandleScope<kNumReferences>::NewHandleWrapper(
+inline HandleWrapperObjPtr<T> FixedSizeHandleScope<kNumReferences>::NewHandleWrapper(
     ObjPtr<T>* object) {
   return HandleWrapperObjPtr<T>(object, NewHandle(*object));
 }
 
 template<size_t kNumReferences>
-inline void StackHandleScope<kNumReferences>::SetReference(size_t i, mirror::Object* object) {
+inline void FixedSizeHandleScope<kNumReferences>::SetReference(size_t i, mirror::Object* object) {
   if (kDebugLocking) {
     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
   }
@@ -135,12 +141,111 @@
   GetReferences()[i].Assign(object);
 }
 
+// Number of references contained within this handle scope.
+inline uint32_t BaseHandleScope::NumberOfReferences() const {
+  return LIKELY(!IsVariableSized())
+      ? AsHandleScope()->NumberOfReferences()
+      : AsVariableSized()->NumberOfReferences();
+}
+
+inline bool BaseHandleScope::Contains(StackReference<mirror::Object>* handle_scope_entry) const {
+  return LIKELY(!IsVariableSized())
+      ? AsHandleScope()->Contains(handle_scope_entry)
+      : AsVariableSized()->Contains(handle_scope_entry);
+}
+
+template <typename Visitor>
+inline void BaseHandleScope::VisitRoots(Visitor& visitor) {
+  if (LIKELY(!IsVariableSized())) {
+    AsHandleScope()->VisitRoots(visitor);
+  } else {
+    AsVariableSized()->VisitRoots(visitor);
+  }
+}
+
+inline VariableSizedHandleScope* BaseHandleScope::AsVariableSized() {
+  DCHECK(IsVariableSized());
+  return down_cast<VariableSizedHandleScope*>(this);
+}
+
+inline HandleScope* BaseHandleScope::AsHandleScope() {
+  DCHECK(!IsVariableSized());
+  return down_cast<HandleScope*>(this);
+}
+
+inline const VariableSizedHandleScope* BaseHandleScope::AsVariableSized() const {
+  DCHECK(IsVariableSized());
+  return down_cast<const VariableSizedHandleScope*>(this);
+}
+
+inline const HandleScope* BaseHandleScope::AsHandleScope() const {
+  DCHECK(!IsVariableSized());
+  return down_cast<const HandleScope*>(this);
+}
+
+template<class T>
+MutableHandle<T> VariableSizedHandleScope::NewHandle(T* object) {
+  if (current_scope_->RemainingSlots() == 0) {
+    current_scope_ = new LocalScopeType(current_scope_);
+  }
+  return current_scope_->NewHandle(object);
+}
+
 template<class MirrorType, bool kPoison>
-inline MutableHandle<MirrorType> StackHandleScopeCollection::NewHandle(
+inline MutableHandle<MirrorType> VariableSizedHandleScope::NewHandle(
     ObjPtr<MirrorType, kPoison> ptr) {
   return NewHandle(ptr.Ptr());
 }
 
+inline VariableSizedHandleScope::VariableSizedHandleScope(Thread* const self)
+    : BaseHandleScope(self->GetTopHandleScope()),
+      self_(self) {
+  current_scope_ = new LocalScopeType(/*link*/ nullptr);
+  self_->PushHandleScope(this);
+}
+
+inline VariableSizedHandleScope::~VariableSizedHandleScope() {
+  BaseHandleScope* top_handle_scope = self_->PopHandleScope();
+  DCHECK_EQ(top_handle_scope, this);
+  while (current_scope_ != nullptr) {
+    LocalScopeType* next = reinterpret_cast<LocalScopeType*>(current_scope_->GetLink());
+    delete current_scope_;
+    current_scope_ = next;
+  }
+}
+
+inline uint32_t VariableSizedHandleScope::NumberOfReferences() const {
+  uint32_t sum = 0;
+  const LocalScopeType* cur = current_scope_;
+  while (cur != nullptr) {
+    sum += cur->NumberOfReferences();
+    cur = reinterpret_cast<const LocalScopeType*>(cur->GetLink());
+  }
+  return sum;
+}
+
+inline bool VariableSizedHandleScope::Contains(StackReference<mirror::Object>* handle_scope_entry)
+    const {
+  const LocalScopeType* cur = current_scope_;
+  while (cur != nullptr) {
+    if (cur->Contains(handle_scope_entry)) {
+      return true;
+    }
+    cur = reinterpret_cast<const LocalScopeType*>(cur->GetLink());
+  }
+  return false;
+}
+
+template <typename Visitor>
+inline void VariableSizedHandleScope::VisitRoots(Visitor& visitor) {
+  LocalScopeType* cur = current_scope_;
+  while (cur != nullptr) {
+    cur->VisitRoots(visitor);
+    cur = reinterpret_cast<LocalScopeType*>(cur->GetLink());
+  }
+}
+
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_HANDLE_SCOPE_INL_H_
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index fc729a5..8a0aba6 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -29,26 +29,69 @@
 
 namespace art {
 
+class HandleScope;
 template<class MirrorType, bool kPoison> class ObjPtr;
+class Thread;
+class VariableSizedHandleScope;
 
 namespace mirror {
 class Object;
 }
 
-class Thread;
+// Basic handle scope, tracked by a list. May be variable sized.
+class PACKED(4) BaseHandleScope {
+ public:
+  bool IsVariableSized() const {
+    return number_of_references_ == kNumReferencesVariableSized;
+  }
+
+  // Number of references contained within this handle scope.
+  ALWAYS_INLINE uint32_t NumberOfReferences() const;
+
+  ALWAYS_INLINE bool Contains(StackReference<mirror::Object>* handle_scope_entry) const;
+
+  template <typename Visitor>
+  ALWAYS_INLINE void VisitRoots(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Link to previous BaseHandleScope or null.
+  BaseHandleScope* GetLink() const {
+    return link_;
+  }
+
+  ALWAYS_INLINE VariableSizedHandleScope* AsVariableSized();
+  ALWAYS_INLINE HandleScope* AsHandleScope();
+  ALWAYS_INLINE const VariableSizedHandleScope* AsVariableSized() const;
+  ALWAYS_INLINE const HandleScope* AsHandleScope() const;
+
+ protected:
+  BaseHandleScope(BaseHandleScope* link, uint32_t num_references)
+      : link_(link),
+        number_of_references_(num_references) {}
+
+  // Variable sized constructor.
+  BaseHandleScope(BaseHandleScope* link)
+      : link_(link),
+        number_of_references_(kNumReferencesVariableSized) {}
+
+  static constexpr int32_t kNumReferencesVariableSized = -1;
+
+  // Link-list of handle scopes. The root is held by a Thread.
+  BaseHandleScope* const link_;
+
+  // Number of handlerized references. -1 for variable sized handle scopes.
+  const int32_t number_of_references_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(BaseHandleScope);
+};
 
 // HandleScopes are scoped objects containing a number of Handles. They are used to allocate
 // handles, for these handles (and the objects contained within them) to be visible/roots for the
 // GC. It is most common to stack allocate HandleScopes using StackHandleScope.
-class PACKED(4) HandleScope {
+class PACKED(4) HandleScope : public BaseHandleScope {
  public:
   ~HandleScope() {}
 
-  // Number of references contained within this handle scope.
-  uint32_t NumberOfReferences() const {
-    return number_of_references_;
-  }
-
   // We have versions with and without explicit pointer size of the following. The first two are
   // used at runtime, so OFFSETOF_MEMBER computes the right offsets automatically. The last one
   // takes the pointer size explicitly so that at compile time we can cross-compile correctly.
@@ -59,11 +102,6 @@
   // Returns the size of a HandleScope containing num_references handles.
   static size_t SizeOf(PointerSize pointer_size, uint32_t num_references);
 
-  // Link to previous HandleScope or null.
-  HandleScope* GetLink() const {
-    return link_;
-  }
-
   ALWAYS_INLINE mirror::Object* GetReference(size_t i) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -93,11 +131,26 @@
   }
 
   // Placement new creation.
-  static HandleScope* Create(void* storage, HandleScope* link, uint32_t num_references)
+  static HandleScope* Create(void* storage, BaseHandleScope* link, uint32_t num_references)
       WARN_UNUSED {
     return new (storage) HandleScope(link, num_references);
   }
 
+  // Number of references contained within this handle scope.
+  ALWAYS_INLINE uint32_t NumberOfReferences() const {
+    DCHECK_GE(number_of_references_, 0);
+    return static_cast<uint32_t>(number_of_references_);
+  }
+
+  template <typename Visitor>
+  void VisitRoots(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+    for (size_t i = 0, count = NumberOfReferences(); i < count; ++i) {
+      // GetReference returns a pointer to the stack reference within the handle scope. If this
+      // needs to be updated, it will be done by the root visitor.
+      visitor.VisitRootIfNonNull(GetHandle(i).GetReference());
+    }
+  }
+
  protected:
   // Return backing storage used for references.
   ALWAYS_INLINE StackReference<mirror::Object>* GetReferences() const {
@@ -105,20 +158,11 @@
     return reinterpret_cast<StackReference<mirror::Object>*>(address);
   }
 
-  explicit HandleScope(size_t number_of_references) :
-      link_(nullptr), number_of_references_(number_of_references) {
-  }
+  explicit HandleScope(size_t number_of_references) : HandleScope(nullptr, number_of_references) {}
 
   // Semi-hidden constructor. Construction expected by generated code and StackHandleScope.
-  HandleScope(HandleScope* link, uint32_t num_references) :
-      link_(link), number_of_references_(num_references) {
-  }
-
-  // Link-list of handle scopes. The root is held by a Thread.
-  HandleScope* const link_;
-
-  // Number of handlerized references.
-  const uint32_t number_of_references_;
+  HandleScope(BaseHandleScope* link, uint32_t num_references)
+      : BaseHandleScope(link, num_references) {}
 
   // Storage for references.
   // StackReference<mirror::Object> references_[number_of_references_]
@@ -165,14 +209,10 @@
   ObjPtr<T>* const obj_;
 };
 
-
-// Scoped handle storage of a fixed size that is usually stack allocated.
+// Fixed size handle scope that is not necessarily linked in the thread.
 template<size_t kNumReferences>
-class PACKED(4) StackHandleScope FINAL : public HandleScope {
+class PACKED(4) FixedSizeHandleScope : public HandleScope {
  public:
-  explicit ALWAYS_INLINE StackHandleScope(Thread* self, mirror::Object* fill_value = nullptr);
-  ALWAYS_INLINE ~StackHandleScope();
-
   template<class T>
   ALWAYS_INLINE MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -191,11 +231,15 @@
   ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  Thread* Self() const {
-    return self_;
+  size_t RemainingSlots() const {
+    return kNumReferences - pos_;
   }
 
  private:
+  explicit ALWAYS_INLINE FixedSizeHandleScope(BaseHandleScope* link,
+                                              mirror::Object* fill_value = nullptr);
+  ALWAYS_INLINE ~FixedSizeHandleScope() {}
+
   template<class T>
   ALWAYS_INLINE MutableHandle<T> GetHandle(size_t i) REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK_LT(i, kNumReferences);
@@ -205,66 +249,65 @@
   // Reference storage needs to be first as expected by the HandleScope layout.
   StackReference<mirror::Object> storage_[kNumReferences];
 
+  // Position new handles will be created.
+  size_t pos_ = 0;
+
+  template<size_t kNumRefs> friend class StackHandleScope;
+  friend class VariableSizedHandleScope;
+};
+
+// Scoped handle storage of a fixed size that is stack allocated.
+template<size_t kNumReferences>
+class PACKED(4) StackHandleScope FINAL : public FixedSizeHandleScope<kNumReferences> {
+ public:
+  explicit ALWAYS_INLINE StackHandleScope(Thread* self, mirror::Object* fill_value = nullptr);
+  ALWAYS_INLINE ~StackHandleScope();
+
+  Thread* Self() const {
+    return self_;
+  }
+
+ private:
   // The thread that the stack handle scope is a linked list upon. The stack handle scope will
   // push and pop itself from this thread.
   Thread* const self_;
-
-  // Position new handles will be created.
-  size_t pos_;
-
-  template<size_t kNumRefs> friend class StackHandleScope;
 };
 
-// Utility class to manage a collection (stack) of StackHandleScope. All the managed
-// scope handle have the same fixed sized.
-// Calls to NewHandle will create a new handle inside the top StackHandleScope.
-// When the handle scope becomes full a new one is created and push on top of the
-// previous.
-//
-// NB:
-// - it is not safe to use the *same* StackHandleScopeCollection intermix with
-// other StackHandleScopes.
-// - this is a an easy way around implementing a full ZoneHandleScope to manage an
-// arbitrary number of handles.
-class StackHandleScopeCollection {
+// Utility class to manage a variable sized handle scope by having a list of fixed size handle
+// scopes.
+// Calls to NewHandle will create a new handle inside the current FixedSizeHandleScope.
+// When the current handle scope becomes full a new one is created and put at the front of the
+// list.
+class VariableSizedHandleScope : public BaseHandleScope {
  public:
-  explicit StackHandleScopeCollection(Thread* const self) :
-      self_(self),
-      current_scope_num_refs_(0) {
-  }
-
-  ~StackHandleScopeCollection() {
-    while (!scopes_.empty()) {
-      delete scopes_.top();
-      scopes_.pop();
-    }
-  }
+  explicit VariableSizedHandleScope(Thread* const self);
+  ~VariableSizedHandleScope();
 
   template<class T>
-  MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (scopes_.empty() || current_scope_num_refs_ >= kNumReferencesPerScope) {
-      StackHandleScope<kNumReferencesPerScope>* scope =
-          new StackHandleScope<kNumReferencesPerScope>(self_);
-      scopes_.push(scope);
-      current_scope_num_refs_ = 0;
-    }
-    current_scope_num_refs_++;
-    return scopes_.top()->NewHandle(object);
-  }
+  MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<class MirrorType, bool kPoison>
   MutableHandle<MirrorType> NewHandle(ObjPtr<MirrorType, kPoison> ptr)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Number of references contained within this handle scope.
+  ALWAYS_INLINE uint32_t NumberOfReferences() const;
+
+  ALWAYS_INLINE bool Contains(StackReference<mirror::Object>* handle_scope_entry) const;
+
+  template <typename Visitor>
+  void VisitRoots(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
  private:
   static constexpr size_t kNumReferencesPerScope = 4;
 
   Thread* const self_;
 
-  std::stack<StackHandleScope<kNumReferencesPerScope>*> scopes_;
-  size_t current_scope_num_refs_;
+  // Linked list of fixed size handle scopes.
+  using LocalScopeType = FixedSizeHandleScope<kNumReferencesPerScope>;
+  LocalScopeType* current_scope_;
 
-  DISALLOW_COPY_AND_ASSIGN(StackHandleScopeCollection);
+  DISALLOW_COPY_AND_ASSIGN(VariableSizedHandleScope);
 };
 
 }  // namespace art
diff --git a/runtime/handle_scope_test.cc b/runtime/handle_scope_test.cc
index c269a37..92063c4 100644
--- a/runtime/handle_scope_test.cc
+++ b/runtime/handle_scope_test.cc
@@ -15,6 +15,7 @@
  */
 
 #include "base/enums.h"
+#include "common_runtime_test.h"
 #include "gtest/gtest.h"
 #include "handle_scope-inl.h"
 #include "scoped_thread_state_change-inl.h"
@@ -22,51 +23,85 @@
 
 namespace art {
 
-// Handle scope with a fixed size which is allocated on the stack.
-template<size_t kNumReferences>
-class NoThreadStackHandleScope : public HandleScope {
- public:
-  explicit NoThreadStackHandleScope(HandleScope* link) : HandleScope(link, kNumReferences) {
-  }
-  ~NoThreadStackHandleScope() {
-  }
-
- private:
-  // references_storage_ needs to be first so that it matches the address of references_
-  StackReference<mirror::Object> references_storage_[kNumReferences];
-};
+class HandleScopeTest : public CommonRuntimeTest {};
 
 // Test the offsets computed for members of HandleScope. Because of cross-compiling
 // it is impossible the use OFFSETOF_MEMBER, so we do some reasonable computations ourselves. This
 // test checks whether we do the right thing.
-TEST(HandleScopeTest, Offsets) NO_THREAD_SAFETY_ANALYSIS {
+TEST_F(HandleScopeTest, Offsets) {
+  ScopedObjectAccess soa(Thread::Current());
+  ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
   // As the members of HandleScope are private, we cannot use OFFSETOF_MEMBER
   // here. So do the inverse: set some data, and access it through pointers created from the offsets.
-  NoThreadStackHandleScope<0x9ABC> test_table(reinterpret_cast<HandleScope*>(0x5678));
-  test_table.SetReference(0, reinterpret_cast<mirror::Object*>(0x1234));
+  StackHandleScope<0x1> hs0(soa.Self());
+  static const size_t kNumReferences = 0x9ABC;
+  StackHandleScope<kNumReferences> test_table(soa.Self());
+  ObjPtr<mirror::Class> c = class_linker->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
+  test_table.SetReference(0, c.Ptr());
 
   uint8_t* table_base_ptr = reinterpret_cast<uint8_t*>(&test_table);
 
   {
-    uintptr_t* link_ptr = reinterpret_cast<uintptr_t*>(table_base_ptr +
+    BaseHandleScope** link_ptr = reinterpret_cast<BaseHandleScope**>(table_base_ptr +
         HandleScope::LinkOffset(kRuntimePointerSize));
-    EXPECT_EQ(*link_ptr, static_cast<size_t>(0x5678));
+    EXPECT_EQ(*link_ptr, &hs0);
   }
 
   {
     uint32_t* num_ptr = reinterpret_cast<uint32_t*>(table_base_ptr +
         HandleScope::NumberOfReferencesOffset(kRuntimePointerSize));
-    EXPECT_EQ(*num_ptr, static_cast<size_t>(0x9ABC));
+    EXPECT_EQ(*num_ptr, static_cast<size_t>(kNumReferences));
   }
 
   {
-    // Assume sizeof(StackReference<mirror::Object>) == sizeof(uint32_t)
-    // TODO: How can we make this assumption-less but still access directly and fully?
-    EXPECT_EQ(sizeof(StackReference<mirror::Object>), sizeof(uint32_t));
-
-    uint32_t* ref_ptr = reinterpret_cast<uint32_t*>(table_base_ptr +
+    auto* ref_ptr = reinterpret_cast<StackReference<mirror::Object>*>(table_base_ptr +
         HandleScope::ReferencesOffset(kRuntimePointerSize));
-    EXPECT_EQ(*ref_ptr, static_cast<uint32_t>(0x1234));
+    EXPECT_OBJ_PTR_EQ(ref_ptr->AsMirrorPtr(), c);
+  }
+}
+
+class CollectVisitor {
+ public:
+  void VisitRootIfNonNull(StackReference<mirror::Object>* ref) {
+    if (!ref->IsNull()) {
+      visited.insert(ref);
+    }
+    ++total_visited;
+  }
+
+  std::set<StackReference<mirror::Object>*> visited;
+  size_t total_visited = 0;  // including null.
+};
+
+// Test functionality of variable sized handle scopes.
+TEST_F(HandleScopeTest, VariableSized) {
+  ScopedObjectAccess soa(Thread::Current());
+  VariableSizedHandleScope hs(soa.Self());
+  ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+  Handle<mirror::Class> c =
+      hs.NewHandle(class_linker->FindSystemClass(soa.Self(), "Ljava/lang/Object;"));
+  // Test nested scopes.
+  StackHandleScope<1> inner(soa.Self());
+  inner.NewHandle(c->AllocObject(soa.Self()));
+  // Add a bunch of handles and make sure callbacks work.
+  static const size_t kNumHandles = 100;
+  std::vector<Handle<mirror::Object>> handles;
+  for (size_t i = 0; i < kNumHandles; ++i) {
+    BaseHandleScope* base = &hs;
+    ObjPtr<mirror::Object> o = c->AllocObject(soa.Self());
+    handles.push_back(hs.NewHandle(o));
+    EXPECT_OBJ_PTR_EQ(o, handles.back().Get());
+    EXPECT_TRUE(hs.Contains(handles.back().GetReference()));
+    EXPECT_TRUE(base->Contains(handles.back().GetReference()));
+    EXPECT_EQ(hs.NumberOfReferences(), base->NumberOfReferences());
+  }
+  CollectVisitor visitor;
+  BaseHandleScope* base = &hs;
+  base->VisitRoots(visitor);
+  EXPECT_LE(visitor.visited.size(), base->NumberOfReferences());
+  EXPECT_EQ(visitor.total_visited, base->NumberOfReferences());
+  for (StackReference<mirror::Object>* ref : visitor.visited) {
+    EXPECT_TRUE(base->Contains(ref));
   }
 }
 
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 31ce4c1..1852956 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -265,7 +265,7 @@
   if (c == nullptr) {
     return 0;
   }
-  StackHandleScopeCollection hs(soa.Self());
+  VariableSizedHandleScope hs(soa.Self());
   std::vector<Handle<mirror::Class>> classes {hs.NewHandle(c)};
   uint64_t count = 0;
   heap->CountInstances(classes, countAssignable, &count);
@@ -284,7 +284,7 @@
   if (decoded_classes == nullptr) {
     return nullptr;
   }
-  StackHandleScopeCollection hs(soa.Self());
+  VariableSizedHandleScope hs(soa.Self());
   std::vector<Handle<mirror::Class>> classes;
   for (size_t i = 0, count = decoded_classes->GetLength(); i < count; ++i) {
     classes.push_back(hs.NewHandle(decoded_classes->Get(i)));
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 7f88035..45d3e34 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1826,7 +1826,7 @@
 
 size_t Thread::NumHandleReferences() {
   size_t count = 0;
-  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur != nullptr; cur = cur->GetLink()) {
+  for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur != nullptr; cur = cur->GetLink()) {
     count += cur->NumberOfReferences();
   }
   return count;
@@ -1835,7 +1835,7 @@
 bool Thread::HandleScopeContains(jobject obj) const {
   StackReference<mirror::Object>* hs_entry =
       reinterpret_cast<StackReference<mirror::Object>*>(obj);
-  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur!= nullptr; cur = cur->GetLink()) {
+  for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur!= nullptr; cur = cur->GetLink()) {
     if (cur->Contains(hs_entry)) {
       return true;
     }
@@ -1847,12 +1847,8 @@
 void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) {
   BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
       visitor, RootInfo(kRootNativeStack, thread_id));
-  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
-    for (size_t j = 0, count = cur->NumberOfReferences(); j < count; ++j) {
-      // GetReference returns a pointer to the stack reference within the handle scope. If this
-      // needs to be updated, it will be done by the root visitor.
-      buffered_visitor.VisitRootIfNonNull(cur->GetHandle(j).GetReference());
-    }
+  for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
+    cur->VisitRoots(buffered_visitor);
   }
 }
 
diff --git a/runtime/thread.h b/runtime/thread.h
index 20b4cc1..376a69c 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -799,17 +799,17 @@
   void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  HandleScope* GetTopHandleScope() {
+  BaseHandleScope* GetTopHandleScope() {
     return tlsPtr_.top_handle_scope;
   }
 
-  void PushHandleScope(HandleScope* handle_scope) {
+  void PushHandleScope(BaseHandleScope* handle_scope) {
     DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
     tlsPtr_.top_handle_scope = handle_scope;
   }
 
-  HandleScope* PopHandleScope() {
-    HandleScope* handle_scope = tlsPtr_.top_handle_scope;
+  BaseHandleScope* PopHandleScope() {
+    BaseHandleScope* handle_scope = tlsPtr_.top_handle_scope;
     DCHECK(handle_scope != nullptr);
     tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
     return handle_scope;
@@ -1446,7 +1446,7 @@
     mirror::Object* monitor_enter_object;
 
     // Top of linked list of handle scopes or null for none.
-    HandleScope* top_handle_scope;
+    BaseHandleScope* top_handle_scope;
 
     // Needed to get the right ClassLoader in JNI_OnLoad, but also
     // useful for testing.