Small optimization for recursive calls: avoid dex cache.
Change-Id: Ic4054b6c38f0a2a530ba6ef747647f86cee0b1b8
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index f9054e0..ef93b8c 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -613,9 +613,11 @@
// Sharpening to kDirect only works if we compile PIC.
DCHECK((optimized_invoke_type == invoke_type) || (optimized_invoke_type != kDirect)
|| compiler_driver_->GetCompilerOptions().GetCompilePic());
+ bool is_recursive =
+ (target_method.dex_method_index == dex_compilation_unit_->GetDexMethodIndex());
invoke = new (arena_) HInvokeStaticOrDirect(
arena_, number_of_arguments, return_type, dex_pc, target_method.dex_method_index,
- optimized_invoke_type);
+ is_recursive, optimized_invoke_type);
}
size_t start_index = 0;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 002d9d4..499f068 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1194,13 +1194,14 @@
// temp = temp->dex_cache_resolved_methods_;
__ LoadFromOffset(
kLoadWord, temp, temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
- // temp = temp[index_in_cache]
- __ LoadFromOffset(
- kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetIndexInDexCache()));
- // LR = temp[offset_of_quick_compiled_code]
- __ LoadFromOffset(kLoadWord, LR, temp,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kArmWordSize).Int32Value());
+ if (!invoke->GetIsRecursive()) {
+ // temp = temp[index_in_cache]
+ __ LoadFromOffset(
+ kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetIndexInDexCache()));
+ // LR = temp[offset_of_quick_compiled_code]
+ __ LoadFromOffset(kLoadWord, LR, temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmWordSize).Int32Value());
+ }
// LR()
__ blx(LR);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index c7517d3..7de162e 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1816,10 +1816,12 @@
// temp = method;
codegen_->LoadCurrentMethod(temp);
- // temp = temp->dex_cache_resolved_methods_;
- __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset()));
- // temp = temp[index_in_cache];
- __ Ldr(temp, HeapOperand(temp, index_in_cache));
+ if (!invoke->GetIsRecursive()) {
+ // temp = temp->dex_cache_resolved_methods_;
+ __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset()));
+ // temp = temp[index_in_cache];
+ __ Ldr(temp, HeapOperand(temp, index_in_cache));
+ }
// lr = temp->entry_point_from_quick_compiled_code_;
__ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArm64WordSize)));
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index e7edd8a..c17b7dc 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1131,10 +1131,12 @@
// temp = method;
codegen_->LoadCurrentMethod(temp);
- // temp = temp->dex_cache_resolved_methods_;
- __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
- // temp = temp[index_in_cache]
- __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetIndexInDexCache())));
+ if (!invoke->GetIsRecursive()) {
+ // temp = temp->dex_cache_resolved_methods_;
+ __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+ // temp = temp[index_in_cache]
+ __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetIndexInDexCache())));
+ }
// (temp + offset_of_quick_compiled_code)()
__ call(Address(
temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ff7fcdc..6dab4b3 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1137,10 +1137,12 @@
// temp = method;
codegen_->LoadCurrentMethod(temp);
- // temp = temp->dex_cache_resolved_methods_;
- __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
- // temp = temp[index_in_cache]
- __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetIndexInDexCache())));
+ if (!invoke->GetIsRecursive()) {
+ // temp = temp->dex_cache_resolved_methods_;
+ __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
+ // temp = temp[index_in_cache]
+ __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetIndexInDexCache())));
+ }
// (temp + offset_of_quick_compiled_code)()
__ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kX86_64WordSize).SizeValue()));
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index c963b70..8ae83eb 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1630,19 +1630,23 @@
Primitive::Type return_type,
uint32_t dex_pc,
uint32_t index_in_dex_cache,
+ bool is_recursive,
InvokeType invoke_type)
: HInvoke(arena, number_of_arguments, return_type, dex_pc),
index_in_dex_cache_(index_in_dex_cache),
- invoke_type_(invoke_type) {}
+ invoke_type_(invoke_type),
+ is_recursive_(is_recursive) {}
uint32_t GetIndexInDexCache() const { return index_in_dex_cache_; }
InvokeType GetInvokeType() const { return invoke_type_; }
+ bool GetIsRecursive() const { return is_recursive_; }
DECLARE_INSTRUCTION(InvokeStaticOrDirect);
private:
const uint32_t index_in_dex_cache_;
const InvokeType invoke_type_;
+ const bool is_recursive_;
DISALLOW_COPY_AND_ASSIGN(HInvokeStaticOrDirect);
};