Add Handle/HandleScope and delete SirtRef.

Delete SirtRef and replaced it with Handle. Handles are value types
which wrap around StackReference*.

Renamed StackIndirectReferenceTable to HandleScope.

Added a scoped handle wrapper which wraps around an Object** and
restores it in its destructor.

Renamed Handle::get -> Get.

Bug: 8473721

Change-Id: Idbfebd4f35af629f0f43931b7c5184b334822c7a
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index e52a8fb..6998e21 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -30,7 +30,7 @@
 #include "mirror/object-inl.h"
 #include "mirror/throwable.h"
 #include "object_utils.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
 #include "thread.h"
 
 namespace art {
@@ -72,7 +72,8 @@
     }
   }
   if (UNLIKELY(!klass->IsInitialized())) {
-    SirtRef<mirror::Class> sirt_klass(self, klass);
+    StackHandleScope<1> hs(self);
+    Handle<mirror::Class> h_klass(hs.NewHandle(klass));
     // EnsureInitialized (the class initializer) might cause a GC.
     // may cause us to suspend meaning that another thread may try to
     // change the allocator while we are stuck in the entrypoints of
@@ -82,11 +83,11 @@
     // has changed and to null-check the return value in case the
     // initialization fails.
     *slow_path = true;
-    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_klass, true, true)) {
+    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_klass, true, true)) {
       DCHECK(self->IsExceptionPending());
       return nullptr;  // Failure
     }
-    return sirt_klass.get();
+    return h_klass.Get();
   }
   return klass;
 }
@@ -96,7 +97,8 @@
                                                                                Thread* self, bool* slow_path)
     NO_THREAD_SAFETY_ANALYSIS {
   if (UNLIKELY(!klass->IsInitialized())) {
-    SirtRef<mirror::Class> sirt_class(self, klass);
+    StackHandleScope<1> hs(self);
+    Handle<mirror::Class> h_class(hs.NewHandle(klass));
     // EnsureInitialized (the class initializer) might cause a GC.
     // may cause us to suspend meaning that another thread may try to
     // change the allocator while we are stuck in the entrypoints of
@@ -106,11 +108,11 @@
     // has changed and to null-check the return value in case the
     // initialization fails.
     *slow_path = true;
-    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_class, true, true)) {
+    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
       DCHECK(self->IsExceptionPending());
       return nullptr;  // Failure
     }
-    return sirt_class.get();
+    return h_class.Get();
   }
   return klass;
 }
@@ -346,14 +348,14 @@
     if (LIKELY(fields_class->IsInitialized())) {
       return resolved_field;
     } else {
-      SirtRef<mirror::Class> sirt_class(self, fields_class);
-      if (LIKELY(class_linker->EnsureInitialized(sirt_class, true, true))) {
+      StackHandleScope<1> hs(self);
+      Handle<mirror::Class> h_class(hs.NewHandle(fields_class));
+      if (LIKELY(class_linker->EnsureInitialized(h_class, true, true))) {
         // Otherwise let's ensure the class is initialized before resolving the field.
         return resolved_field;
-      } else {
-        DCHECK(self->IsExceptionPending());  // Throw exception and unwind
-        return nullptr;  // Failure.
       }
+      DCHECK(self->IsExceptionPending());  // Throw exception and unwind
+      return nullptr;  // Failure.
     }
   }
 }
@@ -386,12 +388,13 @@
                                                     mirror::Object* this_object,
                                                     mirror::ArtMethod* referrer, Thread* self) {
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  SirtRef<mirror::Object> sirt_this(self, type == kStatic ? nullptr : this_object);
+  StackHandleScope<1> hs(self);
+  Handle<mirror::Object> handle_scope_this(hs.NewHandle(type == kStatic ? nullptr : this_object));
   mirror::ArtMethod* resolved_method = class_linker->ResolveMethod(method_idx, referrer, type);
   if (UNLIKELY(resolved_method == nullptr)) {
     DCHECK(self->IsExceptionPending());  // Throw exception and unwind.
     return nullptr;  // Failure.
-  } else if (UNLIKELY(sirt_this.get() == nullptr && type != kStatic)) {
+  } else if (UNLIKELY(handle_scope_this.Get() == nullptr && type != kStatic)) {
     // Maintain interpreter-like semantics where NullPointerException is thrown
     // after potential NoSuchMethodError from class linker.
     ThrowLocation throw_location = self->GetCurrentLocationForThrow();
@@ -420,7 +423,7 @@
     case kDirect:
       return resolved_method;
     case kVirtual: {
-      mirror::ObjectArray<mirror::ArtMethod>* vtable = sirt_this->GetClass()->GetVTable();
+      mirror::ObjectArray<mirror::ArtMethod>* vtable = handle_scope_this->GetClass()->GetVTable();
       uint16_t vtable_index = resolved_method->GetMethodIndex();
       if (access_check &&
           (vtable == nullptr || vtable_index >= static_cast<uint32_t>(vtable->GetLength()))) {
@@ -457,16 +460,16 @@
     }
     case kInterface: {
       uint32_t imt_index = resolved_method->GetDexMethodIndex() % ClassLinker::kImtSize;
-      mirror::ObjectArray<mirror::ArtMethod>* imt_table = sirt_this->GetClass()->GetImTable();
+      mirror::ObjectArray<mirror::ArtMethod>* imt_table = handle_scope_this->GetClass()->GetImTable();
       mirror::ArtMethod* imt_method = imt_table->Get(imt_index);
       if (!imt_method->IsImtConflictMethod()) {
         return imt_method;
       } else {
         mirror::ArtMethod* interface_method =
-            sirt_this->GetClass()->FindVirtualMethodForInterface(resolved_method);
+            handle_scope_this->GetClass()->FindVirtualMethodForInterface(resolved_method);
         if (UNLIKELY(interface_method == nullptr)) {
           ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method,
-                                                                     sirt_this.get(), referrer);
+                                                                     handle_scope_this.Get(), referrer);
           return nullptr;  // Failure.
         } else {
           return interface_method;
@@ -625,12 +628,13 @@
   if (klass == referring_class && referrer->IsConstructor() && referrer->IsStatic()) {
     return klass;
   }
-  SirtRef<mirror::Class> sirt_class(self, klass);
-  if (!class_linker->EnsureInitialized(sirt_class, true, true)) {
+  StackHandleScope<1> hs(self);
+  Handle<mirror::Class> h_class(hs.NewHandle(klass));
+  if (!class_linker->EnsureInitialized(h_class, true, true)) {
     CHECK(self->IsExceptionPending());
     return nullptr;  // Failure - Indicate to caller to deliver exception
   }
-  return sirt_class.get();
+  return h_class.Get();
 }
 
 extern void ThrowStackOverflowError(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
index a0ba6b9..3f02ec7 100644
--- a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
+++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
@@ -34,14 +34,15 @@
     mirror::Class* declaringClass = method->GetDeclaringClass();
     if (UNLIKELY(!declaringClass->IsInitializing())) {
       self->PushShadowFrame(shadow_frame);
-      SirtRef<mirror::Class> sirt_c(self, declaringClass);
-      if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_c, true, true))) {
+      StackHandleScope<1> hs(self);
+      Handle<mirror::Class> h_class(hs.NewHandle(declaringClass));
+      if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true))) {
         self->PopShadowFrame();
         DCHECK(self->IsExceptionPending());
         return;
       }
       self->PopShadowFrame();
-      CHECK(sirt_c->IsInitializing());
+      CHECK(h_class->IsInitializing());
     }
   }
   uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_;
diff --git a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
index f1b15b5..17c3222 100644
--- a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
@@ -214,8 +214,9 @@
 
     if (method->IsStatic() && !method->GetDeclaringClass()->IsInitializing()) {
       // Ensure static method's class is initialized.
-      SirtRef<mirror::Class> sirt_c(self, method->GetDeclaringClass());
-      if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_c, true, true)) {
+      StackHandleScope<1> hs(self);
+      Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
+      if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
         DCHECK(Thread::Current()->IsExceptionPending());
         self->PopManagedStackFragment(fragment);
         return 0;
@@ -396,7 +397,8 @@
   const void* code = nullptr;
   if (LIKELY(!thread->IsExceptionPending())) {
     // Ensure that the called method's class is initialized.
-    SirtRef<mirror::Class> called_class(thread, called->GetDeclaringClass());
+    StackHandleScope<1> hs(Thread::Current());
+    Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
     linker->EnsureInitialized(called_class, true, true);
     if (LIKELY(called_class->IsInitialized())) {
       code = called->GetEntryPointFromPortableCompiledCode();
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 116957d..9c9cca8 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -65,7 +65,7 @@
   JNIEnvExt* env = self->GetJniEnv();
   env->locals.SetSegmentState(env->local_ref_cookie);
   env->local_ref_cookie = saved_local_ref_cookie;
-  self->PopSirt();
+  self->PopHandleScope();
 }
 
 extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self) {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 5d2603f..887bd6f 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -489,8 +489,9 @@
 
     if (method->IsStatic() && !method->GetDeclaringClass()->IsInitializing()) {
       // Ensure static method's class is initialized.
-      SirtRef<mirror::Class> sirt_c(self, method->GetDeclaringClass());
-      if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_c, true, true)) {
+      StackHandleScope<1> hs(self);
+      Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
+      if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
         DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(method);
         self->PopManagedStackFragment(fragment);
         return 0;
@@ -755,9 +756,10 @@
   bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface;
   // Resolve method filling in dex cache.
   if (called->IsRuntimeMethod()) {
-    SirtRef<mirror::Object> sirt_receiver(soa.Self(), virtual_or_interface ? receiver : nullptr);
+    StackHandleScope<1> hs(self);
+    Handle<mirror::Object> handle_scope_receiver(hs.NewHandle(virtual_or_interface ? receiver : nullptr));
     called = linker->ResolveMethod(dex_method_idx, caller, invoke_type);
-    receiver = sirt_receiver.get();
+    receiver = handle_scope_receiver.Get();
   }
   const void* code = NULL;
   if (LIKELY(!self->IsExceptionPending())) {
@@ -796,7 +798,8 @@
       }
     }
     // Ensure that the called method's class is initialized.
-    SirtRef<mirror::Class> called_class(soa.Self(), called->GetDeclaringClass());
+    StackHandleScope<1> hs(soa.Self());
+    Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
     linker->EnsureInitialized(called_class, true, true);
     if (LIKELY(called_class->IsInitialized())) {
       code = called->GetEntryPointFromQuickCompiledCode();
@@ -857,10 +860,10 @@
  *
  * void PushStack(uintptr_t): Push a value to the stack.
  *
- * uintptr_t PushSirt(mirror::Object* ref): Add a reference to the Sirt. This _will_ have nullptr,
+ * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr,
  *                                          as this might be important for null initialization.
  *                                          Must return the jobject, that is, the reference to the
- *                                          entry in the Sirt (nullptr if necessary).
+ *                                          entry in the HandleScope (nullptr if necessary).
  *
  */
 template <class T> class BuildGenericJniFrameStateMachine {
@@ -956,18 +959,18 @@
   }
 
 
-  bool HaveSirtGpr() {
+  bool HaveHandleScopeGpr() {
     return gpr_index_ > 0;
   }
 
-  void AdvanceSirt(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    uintptr_t sirtRef = PushSirt(ptr);
-    if (HaveSirtGpr()) {
+  void AdvanceHandleScope(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    uintptr_t handle = PushHandle(ptr);
+    if (HaveHandleScopeGpr()) {
       gpr_index_--;
-      PushGpr(sirtRef);
+      PushGpr(handle);
     } else {
       stack_entries_++;
-      PushStack(sirtRef);
+      PushStack(handle);
       gpr_index_ = 0;
     }
   }
@@ -1147,8 +1150,8 @@
   void PushStack(uintptr_t val) {
     delegate_->PushStack(val);
   }
-  uintptr_t PushSirt(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return delegate_->PushSirt(ref);
+  uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return delegate_->PushHandle(ref);
   }
 
   uint32_t gpr_index_;      // Number of free GPRs
@@ -1160,7 +1163,7 @@
 
 class ComputeGenericJniFrameSize FINAL {
  public:
-  ComputeGenericJniFrameSize() : num_sirt_references_(0), num_stack_entries_(0) {}
+  ComputeGenericJniFrameSize() : num_handle_scope_references_(0), num_stack_entries_(0) {}
 
   uint32_t GetStackSize() {
     return num_stack_entries_ * sizeof(uintptr_t);
@@ -1168,7 +1171,7 @@
 
   // WARNING: After this, *sp won't be pointing to the method anymore!
   void ComputeLayout(mirror::ArtMethod*** m, bool is_static, const char* shorty, uint32_t shorty_len,
-                     void* sp, StackIndirectReferenceTable** table, uint32_t* sirt_entries,
+                     void* sp, HandleScope** table, uint32_t* handle_scope_entries,
                      uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr,
                      void** code_return, size_t* overall_size)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1179,17 +1182,17 @@
     uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
 
     // First, fix up the layout of the callee-save frame.
-    // We have to squeeze in the Sirt, and relocate the method pointer.
+    // We have to squeeze in the HandleScope, and relocate the method pointer.
 
     // "Free" the slot for the method.
     sp8 += kPointerSize;
 
-    // Add the Sirt.
-    *sirt_entries = num_sirt_references_;
-    size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSize(num_sirt_references_);
-    sp8 -= sirt_size;
-    *table = reinterpret_cast<StackIndirectReferenceTable*>(sp8);
-    (*table)->SetNumberOfReferences(num_sirt_references_);
+    // Add the HandleScope.
+    *handle_scope_entries = num_handle_scope_references_;
+    size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSize(num_handle_scope_references_);
+    sp8 -= handle_scope_size;
+    *table = reinterpret_cast<HandleScope*>(sp8);
+    (*table)->SetNumberOfReferences(num_handle_scope_references_);
 
     // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us.
     sp8 -= kPointerSize;
@@ -1199,8 +1202,8 @@
 
     // Reference cookie and padding
     sp8 -= 8;
-    // Store Sirt size
-    *reinterpret_cast<uint32_t*>(sp8) = static_cast<uint32_t>(sirt_size & 0xFFFFFFFF);
+    // Store HandleScope size
+    *reinterpret_cast<uint32_t*>(sp8) = static_cast<uint32_t>(handle_scope_size & 0xFFFFFFFF);
 
     // Next comes the native call stack.
     sp8 -= GetStackSize();
@@ -1229,7 +1232,7 @@
     *(reinterpret_cast<uint8_t**>(sp8)) = method_pointer;
   }
 
-  void ComputeSirtOffset() { }  // nothing to do, static right now
+  void ComputeHandleScopeOffset() { }  // nothing to do, static right now
 
   void ComputeAll(bool is_static, const char* shorty, uint32_t shorty_len)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1239,13 +1242,13 @@
     sm.AdvancePointer(nullptr);
 
     // Class object or this as first argument
-    sm.AdvanceSirt(reinterpret_cast<mirror::Object*>(0x12345678));
+    sm.AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678));
 
     for (uint32_t i = 1; i < shorty_len; ++i) {
       Primitive::Type cur_type_ = Primitive::GetType(shorty[i]);
       switch (cur_type_) {
         case Primitive::kPrimNot:
-          sm.AdvanceSirt(reinterpret_cast<mirror::Object*>(0x12345678));
+          sm.AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678));
           break;
 
         case Primitive::kPrimBoolean:
@@ -1288,13 +1291,13 @@
     // counting is already done in the superclass
   }
 
-  uintptr_t PushSirt(mirror::Object* /* ptr */) {
-    num_sirt_references_++;
+  uintptr_t PushHandle(mirror::Object* /* ptr */) {
+    num_handle_scope_references_++;
     return reinterpret_cast<uintptr_t>(nullptr);
   }
 
  private:
-  uint32_t num_sirt_references_;
+  uint32_t num_handle_scope_references_;
   uint32_t num_stack_entries_;
 };
 
@@ -1306,26 +1309,26 @@
                               uint32_t shorty_len, Thread* self) :
       QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), sm_(this) {
     ComputeGenericJniFrameSize fsc;
-    fsc.ComputeLayout(sp, is_static, shorty, shorty_len, *sp, &sirt_, &sirt_expected_refs_,
+    fsc.ComputeLayout(sp, is_static, shorty, shorty_len, *sp, &handle_scope_, &handle_scope_expected_refs_,
                       &cur_stack_arg_, &cur_gpr_reg_, &cur_fpr_reg_, &code_return_,
                       &alloca_used_size_);
-    sirt_number_of_references_ = 0;
-    cur_sirt_entry_ = reinterpret_cast<StackReference<mirror::Object>*>(GetFirstSirtEntry());
+    handle_scope_number_of_references_ = 0;
+    cur_hs_entry_ = reinterpret_cast<StackReference<mirror::Object>*>(GetFirstHandleScopeEntry());
 
     // jni environment is always first argument
     sm_.AdvancePointer(self->GetJniEnv());
 
     if (is_static) {
-      sm_.AdvanceSirt((**sp)->GetDeclaringClass());
+      sm_.AdvanceHandleScope((**sp)->GetDeclaringClass());
     }
   }
 
   void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
 
-  void FinalizeSirt(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void FinalizeHandleScope(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  jobject GetFirstSirtEntry() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return reinterpret_cast<jobject>(sirt_->GetStackReference(0));
+  jobject GetFirstHandleScopeEntry() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return handle_scope_->GetHandle(0).ToJObject();
   }
 
   void PushGpr(uintptr_t val) {
@@ -1349,17 +1352,17 @@
     cur_stack_arg_++;
   }
 
-  uintptr_t PushSirt(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     uintptr_t tmp;
     if (ref == nullptr) {
-      *cur_sirt_entry_ = StackReference<mirror::Object>();
+      *cur_hs_entry_ = StackReference<mirror::Object>();
       tmp = reinterpret_cast<uintptr_t>(nullptr);
     } else {
-      *cur_sirt_entry_ = StackReference<mirror::Object>::FromMirrorPtr(ref);
-      tmp = reinterpret_cast<uintptr_t>(cur_sirt_entry_);
+      *cur_hs_entry_ = StackReference<mirror::Object>::FromMirrorPtr(ref);
+      tmp = reinterpret_cast<uintptr_t>(cur_hs_entry_);
     }
-    cur_sirt_entry_++;
-    sirt_number_of_references_++;
+    cur_hs_entry_++;
+    handle_scope_number_of_references_++;
     return tmp;
   }
 
@@ -1373,14 +1376,14 @@
   }
 
  private:
-  uint32_t sirt_number_of_references_;
-  StackReference<mirror::Object>* cur_sirt_entry_;
-  StackIndirectReferenceTable* sirt_;
-  uint32_t sirt_expected_refs_;
+  uint32_t handle_scope_number_of_references_;
+  StackReference<mirror::Object>* cur_hs_entry_;
+  HandleScope* handle_scope_;
+  uint32_t handle_scope_expected_refs_;
   uintptr_t* cur_gpr_reg_;
   uint32_t* cur_fpr_reg_;
   uintptr_t* cur_stack_arg_;
-  // StackReference<mirror::Object>* top_of_sirt_;
+  // StackReference<mirror::Object>* top_of_handle_scope_;
   void* code_return_;
   size_t alloca_used_size_;
 
@@ -1416,7 +1419,7 @@
     case Primitive::kPrimNot: {
       StackReference<mirror::Object>* stack_ref =
           reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
-      sm_.AdvanceSirt(stack_ref->AsMirrorPtr());
+      sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr());
       break;
     }
     case Primitive::kPrimFloat:
@@ -1435,17 +1438,17 @@
   }
 }
 
-void BuildGenericJniFrameVisitor::FinalizeSirt(Thread* self) {
+void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) {
   // Initialize padding entries.
-  while (sirt_number_of_references_ < sirt_expected_refs_) {
-    *cur_sirt_entry_ = StackReference<mirror::Object>();
-    cur_sirt_entry_++;
-    sirt_number_of_references_++;
+  while (handle_scope_number_of_references_ < handle_scope_expected_refs_) {
+    *cur_hs_entry_ = StackReference<mirror::Object>();
+    cur_hs_entry_++;
+    handle_scope_number_of_references_++;
   }
-  sirt_->SetNumberOfReferences(sirt_expected_refs_);
-  DCHECK_NE(sirt_expected_refs_, 0U);
-  // Install Sirt.
-  self->PushSirt(sirt_);
+  handle_scope_->SetNumberOfReferences(handle_scope_expected_refs_);
+  DCHECK_NE(handle_scope_expected_refs_, 0U);
+  // Install HandleScope.
+  self->PushHandleScope(handle_scope_);
 }
 
 extern "C" void* artFindNativeMethod();
@@ -1468,11 +1471,11 @@
 
 /*
  * Initializes an alloca region assumed to be directly below sp for a native call:
- * Create a Sirt and call stack and fill a mini stack with values to be pushed to registers.
+ * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers.
  * The final element on the stack is a pointer to the native code.
  *
  * On entry, the stack has a standard callee-save frame above sp, and an alloca below it.
- * We need to fix this, as the Sirt needs to go into the callee-save frame.
+ * We need to fix this, as the handle scope needs to go into the callee-save frame.
  *
  * The return of this function denotes:
  * 1) How many bytes of the alloca can be released, if the value is non-negative.
@@ -1489,7 +1492,7 @@
   BuildGenericJniFrameVisitor visitor(&sp, called->IsStatic(), mh.GetShorty(), mh.GetShortyLength(),
                                       self);
   visitor.VisitArguments();
-  visitor.FinalizeSirt(self);
+  visitor.FinalizeHandleScope(self);
 
   // fix up managed-stack things in Thread
   self->SetTopOfStack(sp, 0);
@@ -1499,9 +1502,9 @@
   // Start JNI, save the cookie.
   uint32_t cookie;
   if (called->IsSynchronized()) {
-    cookie = JniMethodStartSynchronized(visitor.GetFirstSirtEntry(), self);
+    cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeEntry(), self);
     if (self->IsExceptionPending()) {
-      self->PopSirt();
+      self->PopHandleScope();
       // A negative value denotes an error.
       return -1;
     }
@@ -1527,7 +1530,7 @@
       DCHECK(self->IsExceptionPending());    // There should be an exception pending now.
 
       // End JNI, as the assembly will move to deliver the exception.
-      jobject lock = called->IsSynchronized() ? visitor.GetFirstSirtEntry() : nullptr;
+      jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeEntry() : nullptr;
       if (mh.GetShorty()[0] == 'L') {
         artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock);
       } else {
@@ -1549,7 +1552,7 @@
 }
 
 /*
- * Is called after the native JNI code. Responsible for cleanup (SIRT, saved state) and
+ * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and
  * unlocking.
  */
 extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, mirror::ArtMethod** sp,
@@ -1561,10 +1564,9 @@
 
   jobject lock = nullptr;
   if (called->IsSynchronized()) {
-    StackIndirectReferenceTable* table =
-        reinterpret_cast<StackIndirectReferenceTable*>(
-            reinterpret_cast<uint8_t*>(sp) + kPointerSize);
-    lock = reinterpret_cast<jobject>(table->GetStackReference(0));
+    HandleScope* table = reinterpret_cast<HandleScope*>(
+        reinterpret_cast<uint8_t*>(sp) + kPointerSize);
+    lock = table->GetHandle(0).ToJObject();
   }
 
   MethodHelper mh(called);