Small optimization for recursive calls: avoid dex cache.

Change-Id: I044757a2f06e535cdc1480c4fc8182b89635baf6
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index cc7bf3c..0909424 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -442,6 +442,8 @@
 }
 
 void CodeGeneratorARM64::GenerateFrameEntry() {
+  __ Bind(&frame_entry_label_);
+
   bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod();
   if (do_overflow_check) {
     UseScratchRegisterScope temps(GetVIXLAssembler());
@@ -1845,17 +1847,21 @@
   //
   // Currently we implement the app -> app logic, which looks up in the resolve cache.
 
-  // temp = method;
-  LoadCurrentMethod(temp);
-  // temp = temp->dex_cache_resolved_methods_;
-  __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset()));
-  // temp = temp[index_in_cache];
-  __ Ldr(temp, HeapOperand(temp, index_in_cache));
-  // lr = temp->entry_point_from_quick_compiled_code_;
-  __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
-      kArm64WordSize)));
-  // lr();
-  __ Blr(lr);
+  if (!invoke->IsRecursive()) {
+    // temp = method;
+    LoadCurrentMethod(temp);
+    // temp = temp->dex_cache_resolved_methods_;
+    __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset()));
+    // temp = temp[index_in_cache];
+    __ Ldr(temp, HeapOperand(temp, index_in_cache));
+    // lr = temp->entry_point_from_quick_compiled_code_;
+    __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+        kArm64WordSize)));
+    // lr();
+    __ Blr(lr);
+  } else {
+    __ Bl(&frame_entry_label_);
+  }
 
   RecordPcInfo(invoke, invoke->GetDexPc());
   DCHECK(!IsLeafMethod());